From aee94f6923ba628a85d855d0c5316d0da78bfa2a Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Tue, 19 Dec 2023 09:13:36 +0100 Subject: [PATCH 1/1] update ceph source to reef 18.2.1 Signed-off-by: Thomas Lamprecht --- ceph/.github/pull_request_template.md | 5 +- ceph/CMakeLists.txt | 2 +- ceph/PendingReleaseNotes | 55 + ceph/README.md | 92 +- ceph/ceph.spec | 6 +- ceph/changelog.upstream | 10 +- ceph/debian/ceph-base.docs | 1 - ceph/debian/ceph-mon.postinst | 1 + ceph/debian/ceph-osd.postinst | 1 + ceph/debian/compat | 2 +- ceph/debian/control | 20 +- ceph/debian/copyright | 556 +- ceph/debian/rules | 30 +- ceph/doc/architecture.rst | 641 +- ceph/doc/ceph-volume/lvm/activate.rst | 20 +- ceph/doc/cephadm/host-management.rst | 49 +- ceph/doc/cephadm/install.rst | 170 +- ceph/doc/cephadm/services/index.rst | 102 +- ceph/doc/cephadm/services/mds.rst | 14 +- ceph/doc/cephadm/services/mgr.rst | 4 +- ceph/doc/cephadm/services/nfs.rst | 47 + ceph/doc/cephadm/services/osd.rst | 81 +- ceph/doc/cephadm/services/rgw.rst | 30 +- ceph/doc/cephadm/troubleshooting.rst | 385 +- ceph/doc/cephfs/administration.rst | 65 +- ceph/doc/cephfs/cache-configuration.rst | 6 - ceph/doc/cephfs/fs-volumes.rst | 30 +- ceph/doc/cephfs/mount-using-fuse.rst | 2 +- ceph/doc/cephfs/multimds.rst | 14 + ceph/doc/cephfs/scrub.rst | 11 + ceph/doc/cephfs/snap-schedule.rst | 7 + ceph/doc/cephfs/troubleshooting.rst | 2 +- ceph/doc/dev/balancer-design.rst | 58 + ceph/doc/dev/cache-pool.rst | 200 - ceph/doc/dev/cephfs-mirroring.rst | 2 +- ceph/doc/dev/deduplication.rst | 2 +- ceph/doc/dev/developer_guide/dash-devel.rst | 4 +- .../developer_guide/running-tests-locally.rst | 2 +- ceph/doc/dev/mon-elections.rst | 2 + ceph/doc/dev/osd_internals/manifest.rst | 34 - .../osd_internals/mclock_wpq_cmp_study.rst | 23 - ceph/doc/dev/prim-balancer-design.rst | 53 - ceph/doc/dev/release-checklists.rst | 2 +- ceph/doc/glossary.rst | 23 +- ceph/doc/index.rst | 9 +- ceph/doc/install/get-packages.rst | 10 +- ceph/doc/man/8/ceph-monstore-tool.rst | 90 + ceph/doc/man/8/osdmaptool.rst | 37 + ceph/doc/man/8/radosgw-admin.rst | 184 +- ceph/doc/man/8/radosgw.rst | 38 +- ceph/doc/mgr/nfs.rst | 39 +- ceph/doc/mgr/prometheus.rst | 10 + ceph/doc/monitoring/index.rst | 474 + ceph/doc/rados/configuration/ceph-conf.rst | 19 +- .../configuration/filestore-config-ref.rst | 11 +- .../rados/configuration/mon-config-ref.rst | 35 +- ceph/doc/rados/configuration/msgr2.rst | 2 +- .../rados/configuration/osd-config-ref.rst | 25 +- .../configuration/pool-pg-config-ref.rst | 2 + ceph/doc/rados/operations/add-or-rm-mons.rst | 496 +- ceph/doc/rados/operations/balancer.rst | 4 +- ceph/doc/rados/operations/control.rst | 428 +- ceph/doc/rados/operations/crush-map.rst | 2 + .../rados/operations/erasure-code-profile.rst | 2 + ceph/doc/rados/operations/health-checks.rst | 29 +- ceph/doc/rados/operations/index.rst | 3 +- .../rados/operations/monitoring-osd-pg.rst | 34 +- .../doc/rados/operations/placement-groups.rst | 6 + ceph/doc/rados/operations/pools.rst | 218 +- ceph/doc/rados/operations/read-balancer.rst | 64 + ceph/doc/rados/operations/upmap.rst | 16 +- ceph/doc/rados/troubleshooting/community.rst | 25 +- .../rados/troubleshooting/cpu-profiling.rst | 65 +- ceph/doc/rados/troubleshooting/index.rst | 8 +- .../troubleshooting/memory-profiling.rst | 156 +- .../troubleshooting/troubleshooting-mon.rst | 644 +- .../troubleshooting/troubleshooting-osd.rst | 885 +- .../troubleshooting/troubleshooting-pg.rst | 660 +- ceph/doc/radosgw/admin.rst | 51 +- ceph/doc/radosgw/config-ref.rst | 13 +- ceph/doc/radosgw/dynamicresharding.rst | 87 +- ceph/doc/radosgw/multisite.rst | 18 +- ceph/doc/radosgw/notifications.rst | 3 +- .../radosgw/s3-notification-compatibility.rst | 2 +- ceph/doc/radosgw/s3.rst | 6 - ceph/doc/releases/index.rst | 8 +- ceph/doc/releases/reef.rst | 551 + ceph/doc/releases/releases.yml | 7 +- ceph/doc/start/documenting-ceph.rst | 23 +- ceph/doc/start/hardware-recommendations.rst | 391 +- ceph/doc/start/os-recommendations.rst | 46 +- ceph/install-deps.sh | 77 +- ceph/make-dist | 2 +- ceph/qa/cephfs/begin/1-ceph.yaml | 1 + .../cephfs/overrides/ignorelist_health.yaml | 1 + .../centos_8.yaml | 1 + .../centos_latest.yaml | 1 + .../supported-all-distro/centos_latest.yaml | 1 + .../supported-all-distro/ubuntu_20.04.yaml | 1 + .../supported-all-distro/ubuntu_latest.yaml | 2 +- .../basic/cachepool/none.yaml => rbd/conf/+} | 0 ceph/qa/rbd/conf/disable-pool-app.yaml | 5 + .../data-pool/ec.yaml} | 0 .../rbd/cli/pool => rbd/data-pool}/none.yaml | 0 .../data-pool/replicated.yaml} | 0 ceph/qa/rgw/ignore-pg-availability.yaml | 2 + ceph/qa/standalone/ceph-helpers.sh | 23 + .../mon-stretch/mon-stretch-fail-recovery.sh | 1 - .../mon-stretch-uneven-crush-weights.sh | 145 + .../qa/standalone/mon/mon-last-epoch-clean.sh | 2 +- ceph/qa/standalone/osd/divergent-priors.sh | 27 +- .../crimson-rados/basic/centos_8.stream.yaml | 1 + .../crimson-rados/basic/centos_latest.yaml | 1 - .../basic/crimson-supported-all-distro | 1 + .../crimson-rados/rbd/centos_8.stream.yaml | 1 + .../crimson-rados/rbd/centos_latest.yaml | 1 - .../rbd/crimson-supported-all-distro | 1 + .../none.yaml => crimson-rados/singleton/%} | 0 ceph/qa/suites/crimson-rados/singleton/.qa | 1 + .../qa/suites/crimson-rados/singleton/all/.qa | 1 + .../singleton/all/osd-backfill.yaml | 29 + .../singleton/crimson-supported-all-distro | 1 + .../singleton/crimson_qa_overrides.yaml | 1 + .../crimson-rados/singleton/objectstore | 1 + .../suites/crimson-rados/singleton/rados.yaml | 1 + .../crimson-rados/thrash/centos_8.stream.yaml | 1 + .../crimson-rados/thrash/centos_latest.yaml | 1 - .../thrash/crimson-supported-all-distro | 1 + .../qa/suites/fs/functional/tasks/damage.yaml | 1 + .../mirror-ha/cephfs-mirror/+} | 0 .../cephfs-mirror/1-volume-create-rm.yaml | 14 + ...-cluster.yaml => 2-three-per-cluster.yaml} | 0 .../workloads/cephfs-mirror-ha-workunit.yaml | 11 - .../fs/multiclient/tasks/ior-shared-file.yaml | 18 +- .../suites/fs/multiclient/tasks/mdtest.yaml | 32 +- .../{rbd/librbd/config/none.yaml => fs/nfs/%} | 0 .../singleton/msgr-failures => fs/nfs}/.qa | 0 .../pool/none.yaml => fs/nfs/cluster/+} | 0 .../basic/cachepool => fs/nfs/cluster}/.qa | 0 .../nfs/cluster/1-node.yaml} | 9 +- .../{rbd/cli/pool => fs/nfs/overrides}/.qa | 0 .../fs/nfs/overrides/ignorelist_health.yaml | 13 + .../nfs/supported-random-distros$} | 0 .../{rbd/cli_v1/pool => fs/nfs/tasks}/.qa | 0 ceph/qa/suites/fs/nfs/tasks/nfs.yaml | 4 + .../multifs/overrides/client-shutdown.yaml | 6 + .../workloads/overrides/client-shutdown.yaml | 6 + .../workload/tasks/5-workunit/postgres.yaml | 2 +- ceph/qa/suites/krbd/basic/conf.yaml | 1 + ceph/qa/suites/krbd/fsx/conf.yaml | 1 + ceph/qa/suites/krbd/ms_modeless/conf.yaml | 1 + ceph/qa/suites/krbd/rbd-nomount/conf.yaml | 1 + ceph/qa/suites/krbd/rbd/conf.yaml | 1 + .../singleton-msgr-failures/%} | 0 .../pool => krbd/singleton-msgr-failures}/.qa | 0 .../bluestore-bitmap.yaml | 1 + .../krbd/singleton-msgr-failures/conf.yaml | 7 + .../singleton-msgr-failures/ms_mode$}/.qa | 0 .../ms_mode$/crc-rxbounce.yaml | 5 + .../singleton-msgr-failures/ms_mode$/crc.yaml | 5 + .../ms_mode$/legacy-rxbounce.yaml | 5 + .../ms_mode$/legacy.yaml | 5 + .../ms_mode$/secure.yaml | 5 + .../msgr-failures}/.qa | 0 .../msgr-failures/few.yaml | 0 .../msgr-failures/many.yaml | 0 .../singleton-msgr-failures/tasks}/.qa | 0 .../tasks/rbd_xfstests.yaml | 0 ceph/qa/suites/krbd/singleton/conf.yaml | 1 + .../singleton/tasks/krbd_watch_errors.yaml | 19 + ceph/qa/suites/krbd/thrash/conf.yaml | 1 + ceph/qa/suites/krbd/unmap/conf.yaml | 2 + ceph/qa/suites/krbd/wac/sysfs/conf.yaml | 1 + ceph/qa/suites/krbd/wac/wac/conf.yaml | 1 + ceph/qa/suites/orch/cephadm/nfs | 1 + .../orchestrator_cli/orchestrator_cli.yaml | 3 +- .../2-services/nfs-haproxy-proto.yaml | 35 + .../smoke-roleless/2-services/nvmeof.yaml | 8 + .../workunits/task/test_ca_signed_key.yaml | 31 + .../perf-basic/objectstore/bluestore.yaml | 2 + .../rados/basic/tasks/rados_stress_watch.yaml | 1 + .../rados/basic/tasks/rados_striper.yaml | 4 + .../suites/rados/basic/tasks/readwrite.yaml | 2 + .../suites/rados/basic/tasks/repair_test.yaml | 1 + .../suites/rados/basic/tasks/scrub_test.yaml | 1 + .../rados/dashboard/tasks/dashboard.yaml | 2 + ceph/qa/suites/rados/mgr/tasks/crash.yaml | 1 + ceph/qa/suites/rados/mgr/tasks/failover.yaml | 1 + ceph/qa/suites/rados/mgr/tasks/insights.yaml | 1 + .../rados/mgr/tasks/module_selftest.yaml | 1 + .../mgr/tasks/per_module_finisher_stats.yaml | 2 + ceph/qa/suites/rados/mgr/tasks/progress.yaml | 1 + .../qa/suites/rados/mgr/tasks/prometheus.yaml | 1 + ceph/qa/suites/rados/mgr/tasks/workunits.yaml | 1 + .../monthrash/thrashers/force-sync-many.yaml | 1 + .../rados/monthrash/thrashers/many.yaml | 1 + .../suites/rados/monthrash/thrashers/one.yaml | 1 + .../rados/monthrash/thrashers/sync-many.yaml | 1 + .../rados/monthrash/thrashers/sync.yaml | 1 + .../workloads/pool-create-delete.yaml | 1 - .../rados/monthrash/workloads/rados_5925.yaml | 1 - .../monthrash/workloads/rados_api_tests.yaml | 1 - .../multimon/tasks/mon_clock_no_skews.yaml | 1 + .../multimon/tasks/mon_clock_with_skews.yaml | 1 + .../rados/multimon/tasks/mon_recovery.yaml | 1 + .../backends/objectcacher-stress.yaml | 2 + ceph/qa/suites/rados/perf/ceph.yaml | 1 + ceph/qa/suites/rados/rest/mgr-restful.yaml | 1 + .../all/admin_socket_output.yaml | 1 + .../rados/singleton-nomsgr/all/balancer.yaml | 1 + .../singleton-nomsgr/all/cache-fs-trunc.yaml | 1 + .../all/ceph-kvstore-tool.yaml | 2 + .../all/export-after-evict.yaml | 1 + .../singleton-nomsgr/all/full-tiering.yaml | 1 + .../singleton-nomsgr/all/health-warnings.yaml | 1 + .../all/multi-backfill-reject.yaml | 1 + .../singleton-nomsgr/all/pool-access.yaml | 2 + .../rados/singleton/all/admin-socket.yaml | 2 + .../rados/singleton/all/backfill-toofull.yaml | 1 + .../rados/singleton/all/dump-stuck.yaml | 1 + .../singleton/all/ec-inconsistent-hinfo.yaml | 1 + .../rados/singleton/all/ec-lost-unfound.yaml | 1 + .../singleton/all/lost-unfound-delete.yaml | 1 + .../rados/singleton/all/lost-unfound.yaml | 1 + .../all/max-pg-per-osd.from-mon.yaml | 1 + .../all/max-pg-per-osd.from-primary.yaml | 1 + .../all/max-pg-per-osd.from-replica.yaml | 1 + .../rados/singleton/all/mon-auth-caps.yaml | 1 + .../singleton/all/mon-config-key-caps.yaml | 1 + .../rados/singleton/all/mon-config.yaml | 2 + .../rados/singleton/all/osd-backfill.yaml | 1 + .../all/osd-recovery-incomplete.yaml | 1 + .../rados/singleton/all/osd-recovery.yaml | 1 + ceph/qa/suites/rados/singleton/all/peer.yaml | 1 + .../all/pg-autoscaler-progress-off.yaml | 1 + .../rados/singleton/all/pg-autoscaler.yaml | 1 + .../all/pg-removal-interruption.yaml | 1 + .../rados/singleton/all/rebuild-mondb.yaml | 1 + .../rados/singleton/all/test-crash.yaml | 1 + .../singleton/all/test-noautoscale-flag.yaml | 1 + .../singleton/all/thrash-backfill-full.yaml | 1 + .../rados/singleton/all/thrash-eio.yaml | 1 + .../all/thrash-rados/thrash-rados.yaml | 1 + .../thrash_cache_writeback_proxy_none.yaml | 1 + .../all/watch-notify-same-primary.yaml | 1 + .../thrashers/careful.yaml | 1 + .../thrashers/default.yaml | 1 + .../thrashers/fastread.yaml | 1 + .../thrashers/mapgap.yaml | 1 + .../thrashers/morepggrow.yaml | 1 + .../thrashers/pggrow.yaml | 1 + .../thrashers/careful.yaml | 1 + .../thrashers/default.yaml | 1 + .../thrashers/careful.yaml | 1 + .../thrashers/default.yaml | 1 + .../thrashers/fastread.yaml | 1 + .../thrashers/minsize_recovery.yaml | 1 + .../thrashers/morepggrow.yaml | 1 + .../thrash-erasure-code/thrashers/pggrow.yaml | 1 + .../thrash-old-clients/thrashers/careful.yaml | 1 + .../thrash-old-clients/thrashers/default.yaml | 1 + .../thrash-old-clients/thrashers/mapgap.yaml | 1 + .../thrashers/morepggrow.yaml | 1 + .../thrash-old-clients/thrashers/pggrow.yaml | 1 + .../admin_socket_objecter_requests.yaml | 2 + .../thrash/workloads/cache-agent-big.yaml | 1 + .../thrash/workloads/cache-agent-small.yaml | 1 + .../workloads/cache-pool-snaps-readproxy.yaml | 1 + .../thrash/workloads/cache-pool-snaps.yaml | 1 + .../workloads/cache-snaps-balanced.yaml | 1 + .../rados/thrash/workloads/cache-snaps.yaml | 1 + .../suites/rados/thrash/workloads/cache.yaml | 1 + .../thrash/workloads/dedup-io-mixed.yaml | 4 + .../thrash/workloads/dedup-io-snaps.yaml | 4 + .../workloads/pool-snaps-few-objects.yaml | 3 + .../radosbench-high-concurrency.yaml | 2 + .../rados/thrash/workloads/radosbench.yaml | 2 + .../rados/thrash/workloads/redirect.yaml | 4 + .../workloads/redirect_promote_tests.yaml | 4 + .../thrash/workloads/redirect_set_object.yaml | 4 + .../thrash/workloads/set-chunks-read.yaml | 4 + .../workloads/small-objects-balanced.yaml | 2 + .../workloads/small-objects-localized.yaml | 2 + .../rados/thrash/workloads/small-objects.yaml | 2 + .../workloads/snaps-few-objects-balanced.yaml | 4 + .../snaps-few-objects-localized.yaml | 4 + .../thrash/workloads/snaps-few-objects.yaml | 4 + .../workloads/write_fadvise_dontneed.yaml | 4 + .../suites/rados/valgrind-leaks/1-start.yaml | 1 + .../rados/verify/tasks/rados_cls_all.yaml | 2 + ceph/qa/suites/rbd/basic/cachepool/small.yaml | 17 - ceph/qa/suites/rbd/basic/conf | 1 + .../basic/tasks/rbd_api_tests_old_format.yaml | 1 - ceph/qa/suites/rbd/cli/conf | 1 + ceph/qa/suites/rbd/cli/data-pool | 1 + ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml | 27 - .../suites/rbd/cli/pool/small-cache-pool.yaml | 17 - .../rbd_support_module_recovery.yaml | 13 + ceph/qa/suites/rbd/cli_v1/conf | 1 + .../rbd/cli_v1/pool/small-cache-pool.yaml | 17 - ceph/qa/suites/rbd/encryption/conf | 1 + ceph/qa/suites/rbd/encryption/data-pool | 1 + .../rbd/encryption/pool/ec-cache-pool.yaml | 21 - .../encryption/pool/replicated-data-pool.yaml | 11 - .../rbd/encryption/pool/small-cache-pool.yaml | 17 - .../qa/suites/rbd/immutable-object-cache/conf | 1 + ceph/qa/suites/rbd/iscsi/conf | 1 + ceph/qa/suites/rbd/librbd/conf | 1 + ceph/qa/suites/rbd/librbd/data-pool | 1 + .../rbd/{qemu/pool => librbd/extra-conf}/.qa | 0 .../{config => extra-conf}/copy-on-read.yaml | 0 .../pool => librbd/extra-conf}/none.yaml | 0 .../permit-partial-discard.yaml | 0 .../suites/rbd/librbd/pool/ec-data-pool.yaml | 24 - .../rbd/librbd/pool/replicated-data-pool.yaml | 11 - .../rbd/librbd/pool/small-cache-pool.yaml | 17 - .../rbd/librbd/workloads/c_api_tests.yaml | 1 - .../workloads/c_api_tests_with_defaults.yaml | 1 - .../c_api_tests_with_journaling.yaml | 1 - ceph/qa/suites/rbd/maintenance/conf | 1 + ceph/qa/suites/rbd/migration/5-data-pool | 1 + .../rbd/migration/5-pool/ec-data-pool.yaml | 24 - .../5-pool/replicated-data-pool.yaml | 11 - ceph/qa/suites/rbd/migration/conf | 1 + ceph/qa/suites/rbd/mirror-thrash/conf | 1 + ceph/qa/suites/rbd/mirror/conf | 1 + ceph/qa/suites/rbd/nbd/conf | 1 + ceph/qa/suites/rbd/pwl-cache/home/conf | 1 + ceph/qa/suites/rbd/pwl-cache/tmpfs/conf | 1 + ceph/qa/suites/rbd/qemu/conf | 1 + ceph/qa/suites/rbd/qemu/data-pool | 1 + .../suites/rbd/qemu/pool/ec-cache-pool.yaml | 21 - .../qa/suites/rbd/qemu/pool/ec-data-pool.yaml | 24 - .../rbd/qemu/pool/replicated-data-pool.yaml | 11 - .../rbd/qemu/pool/small-cache-pool.yaml | 17 - .../singleton-bluestore/all/issue-20295.yaml | 2 - ceph/qa/suites/rbd/singleton-bluestore/conf | 1 + .../suites/rbd/singleton/all/rbd_mirror.yaml | 1 - .../suites/rbd/singleton/all/rbd_tasks.yaml | 4 - ceph/qa/suites/rbd/singleton/conf | 1 + ceph/qa/suites/rbd/thrash/conf | 1 + .../qa/suites/rbd/thrash/thrashers/cache.yaml | 21 - .../rbd/thrash/workloads/rbd_api_tests.yaml | 1 - .../workloads/rbd_api_tests_copy_on_read.yaml | 1 - .../workloads/rbd_api_tests_journaling.yaml | 1 - .../workloads/rbd_api_tests_no_locking.yaml | 1 - ceph/qa/suites/rbd/valgrind/conf | 1 + .../rbd/valgrind/workloads/c_api_tests.yaml | 1 - .../workloads/c_api_tests_with_defaults.yaml | 1 - .../c_api_tests_with_journaling.yaml | 1 - .../rbd/valgrind/workloads/rbd_mirror.yaml | 1 - .../ignore-pg-availability.yaml | 1 + ceph/qa/suites/rgw/crypt/2-kms/barbican.yaml | 10 +- ceph/qa/suites/rgw/crypt/ubuntu_latest.yaml | 1 + .../rgw/dbstore/ignore-pg-availability.yaml | 1 + .../hadoop-s3a/ignore-pg-availability.yaml | 1 + .../rgw/lifecycle/ignore-pg-availability.yaml | 1 + .../notifications/ignore-pg-availability.yaml | 1 + .../service-token/ignore-pg-availability.yaml | 1 + .../rgw/tempest/ignore-pg-availability.yaml | 1 + .../rgw/thrash/ignore-pg-availability.yaml | 1 + .../rgw/tools/ignore-pg-availability.yaml | 1 + .../upgrade/1-install/pacific/overrides.yaml | 2 +- .../upgrade/1-install/quincy/overrides.yaml | 2 +- .../rgw/upgrade/ignore-pg-availability.yaml | 1 + .../suites/rgw/verify/tasks/bucket-check.yaml | 5 + .../suites/rgw/verify/tasks/mp_reupload.yaml | 5 + .../rgw/website/ignore-pg-availability.yaml | 1 + .../suites/smoke/basic/supported-all-distro | 1 + .../smoke/basic/supported-random-distro$ | 1 - .../test/cfuse_workunit_suites_blogbench.yaml | 2 + .../test/cfuse_workunit_suites_fsstress.yaml | 2 + .../test/cfuse_workunit_suites_iozone.yaml | 2 + .../tasks/test/cfuse_workunit_suites_pjd.yaml | 2 + .../test/kclient_workunit_direct_io.yaml | 2 + .../test/kclient_workunit_suites_dbench.yaml | 2 + .../kclient_workunit_suites_fsstress.yaml | 2 + .../test/kclient_workunit_suites_pjd.yaml | 2 + .../tasks/test/libcephfs_interface_tests.yaml | 2 + .../smoke/basic/tasks/test/rados_cls_all.yaml | 2 + .../tasks/test/rbd_cli_import_export.yaml | 2 + .../tasks/test/rbd_python_api_tests.yaml | 2 + .../test/rbd_workunit_suites_iozone.yaml | 2 + .../basic/tasks/test/rgw_ec_s3tests.yaml | 2 + .../smoke/basic/tasks/test/rgw_s3tests.yaml | 2 + .../parallel/workload/test_rbd_api.yaml | 2 + .../parallel/workload/test_rbd_python.yaml | 2 + .../2-first-half-tasks/rbd_api.yaml | 2 + .../stress-split/3-stress-tasks/rbd_api.yaml | 2 + .../0-cluster/start.yaml | 1 + .../parallel/workload/test_rbd_api.yaml | 2 + .../parallel/workload/test_rbd_python.yaml | 2 + .../2-first-half-tasks/rbd_api.yaml | 2 + .../stress-split/3-stress-tasks/rbd_api.yaml | 2 + ceph/qa/tasks/barbican.py | 5 +- ceph/qa/tasks/cephadm.py | 115 +- ceph/qa/tasks/cephfs/cephfs_test_case.py | 6 + ceph/qa/tasks/cephfs/kernel_mount.py | 5 +- ceph/qa/tasks/cephfs/mount.py | 59 +- ceph/qa/tasks/cephfs/test_admin.py | 30 +- ceph/qa/tasks/cephfs/test_client_limits.py | 97 +- ceph/qa/tasks/cephfs/test_client_recovery.py | 20 +- ceph/qa/tasks/cephfs/test_damage.py | 12 +- ceph/qa/tasks/cephfs/test_failover.py | 28 +- ceph/qa/tasks/cephfs/test_mirroring.py | 35 + ceph/qa/tasks/cephfs/test_misc.py | 23 + ceph/qa/tasks/cephfs/test_scrub_checks.py | 44 + ceph/qa/tasks/cephfs/test_snap_schedules.py | 86 +- ceph/qa/tasks/cephfs/test_volumes.py | 36 +- ceph/qa/tasks/cephfs/xfstests_dev.py | 4 +- ceph/qa/tasks/mgr/dashboard/test_pool.py | 2 + ceph/qa/tasks/mgr/mgr_test_case.py | 8 + ceph/qa/tasks/mgr/test_failover.py | 34 + ceph/qa/valgrind.supp | 12 + ceph/qa/workunits/cephadm/test_cephadm.sh | 123 +- ceph/qa/workunits/cephtool/test.sh | 2 +- ceph/qa/workunits/mon/pg_autoscaler.sh | 10 + .../qa/workunits/mon/test_noautoscale_flag.sh | 25 +- ceph/qa/workunits/rbd/krbd_watch_errors.sh | 53 + ceph/qa/workunits/rbd/rbd-nbd.sh | 10 + ceph/qa/workunits/rbd/rbd_mirror_helpers.sh | 10 + ceph/qa/workunits/rbd/rbd_mirror_journal.sh | 24 +- ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh | 27 +- .../rbd/rbd_support_module_recovery.sh | 77 + ceph/qa/workunits/rgw/common.py | 46 + ceph/qa/workunits/rgw/run-bucket-check.sh | 19 + .../qa/workunits/rgw/test_rgw_bucket_check.py | 194 + ceph/qa/workunits/rgw/test_rgw_reshard.py | 26 +- .../workunits/rgw/test_rgw_s3_mp_reupload.py | 121 + .../workunits/rgw/test_rgw_s3_mp_reupload.sh | 110 + ceph/src/.git_version | 4 +- ceph/src/CMakeLists.txt | 2 +- ceph/src/SimpleRADOSStriper.cc | 14 +- ceph/src/ceph-volume/ceph_volume/api/lvm.py | 37 + .../ceph_volume/devices/lvm/deactivate.py | 2 +- .../ceph_volume/devices/lvm/migrate.py | 44 +- .../ceph_volume/devices/lvm/prepare.py | 14 +- .../ceph_volume/devices/lvm/zap.py | 5 +- .../ceph_volume/devices/raw/common.py | 6 + .../ceph_volume/devices/raw/list.py | 67 +- .../ceph_volume/devices/raw/prepare.py | 17 +- .../ceph-volume/ceph_volume/inventory/main.py | 13 +- .../ceph_volume/tests/api/test_lvm.py | 12 + .../tests/devices/lvm/test_deactivate.py | 2 +- .../tests/devices/lvm/test_migrate.py | 450 + .../ceph_volume/tests/util/test_disk.py | 40 +- .../ceph-volume/ceph_volume/util/device.py | 33 +- ceph/src/ceph-volume/ceph_volume/util/disk.py | 57 +- .../ceph_volume/util/encryption.py | 27 +- ceph/src/cephadm/CMakeLists.txt | 8 +- ceph/src/cephadm/build.py | 41 +- ceph/src/cephadm/cephadm.py | 1340 +- ceph/src/cephadm/tests/fixtures.py | 12 +- ceph/src/cephadm/tests/test_agent.py | 4 +- ceph/src/cephadm/tests/test_cephadm.py | 92 +- ceph/src/cephadm/tests/test_util_funcs.py | 155 +- ceph/src/client/Client.cc | 424 +- ceph/src/client/Client.h | 30 +- ceph/src/client/Dentry.h | 1 + ceph/src/client/MetaRequest.cc | 3 + ceph/src/client/MetaRequest.h | 9 + ceph/src/client/MetaSession.h | 7 + ceph/src/cls/rgw/cls_rgw.cc | 195 +- ceph/src/cls/rgw/cls_rgw_types.cc | 50 +- ceph/src/common/Formatter.cc | 53 +- ceph/src/common/Formatter.h | 5 + ceph/src/common/TrackedOp.cc | 12 +- ceph/src/common/TrackedOp.h | 65 +- ceph/src/common/ceph_strings.cc | 1 + ceph/src/common/options/mds.yaml.in | 22 +- ceph/src/common/options/rgw.yaml.in | 2 +- ceph/src/crimson/admin/admin_socket.cc | 8 + ceph/src/crimson/common/config_proxy.h | 11 +- ceph/src/crimson/common/errorator.h | 4 +- ceph/src/crimson/common/operation.h | 45 +- ceph/src/crimson/mgr/client.cc | 7 +- ceph/src/crimson/mgr/client.h | 3 +- ceph/src/crimson/net/Connection.h | 24 +- ceph/src/crimson/net/Dispatcher.h | 20 +- ceph/src/crimson/net/FrameAssemblerV2.cc | 295 +- ceph/src/crimson/net/FrameAssemblerV2.h | 119 +- ceph/src/crimson/net/Fwd.h | 4 +- ceph/src/crimson/net/Interceptor.h | 47 +- ceph/src/crimson/net/Messenger.cc | 6 +- ceph/src/crimson/net/Messenger.h | 3 +- ceph/src/crimson/net/ProtocolV2.cc | 1232 +- ceph/src/crimson/net/ProtocolV2.h | 67 +- ceph/src/crimson/net/Socket.cc | 394 +- ceph/src/crimson/net/Socket.h | 273 +- ceph/src/crimson/net/SocketConnection.cc | 131 +- ceph/src/crimson/net/SocketConnection.h | 133 +- ceph/src/crimson/net/SocketMessenger.cc | 64 +- ceph/src/crimson/net/SocketMessenger.h | 106 +- ceph/src/crimson/net/chained_dispatchers.cc | 35 +- ceph/src/crimson/net/chained_dispatchers.h | 13 +- ceph/src/crimson/net/io_handler.cc | 989 +- ceph/src/crimson/net/io_handler.h | 455 +- ceph/src/crimson/os/futurized_store.h | 2 - ceph/src/crimson/os/seastore/CMakeLists.txt | 3 +- ceph/src/crimson/os/seastore/async_cleaner.cc | 4 +- .../seastore/backref/btree_backref_manager.cc | 84 +- .../seastore/backref/btree_backref_manager.h | 9 +- .../src/crimson/os/seastore/backref_manager.h | 3 - .../os/seastore/btree/btree_range_pin.h | 44 +- .../os/seastore/btree/fixed_kv_btree.h | 95 +- .../crimson/os/seastore/btree/fixed_kv_node.h | 4 +- ceph/src/crimson/os/seastore/cache.cc | 34 +- ceph/src/crimson/os/seastore/cache.h | 224 +- ceph/src/crimson/os/seastore/cached_extent.h | 151 +- .../collection_flat_node.cc | 10 +- .../collection_manager/collection_flat_node.h | 14 +- .../flat_collection_manager.cc | 1 - .../journal/circular_bounded_journal.cc | 362 +- .../journal/circular_bounded_journal.h | 61 +- .../journal/circular_journal_space.cc | 31 +- .../seastore/journal/circular_journal_space.h | 8 +- ceph/src/crimson/os/seastore/lba_manager.h | 36 +- .../lba_manager/btree/btree_lba_manager.cc | 268 +- .../lba_manager/btree/btree_lba_manager.h | 219 +- .../lba_manager/btree/lba_btree_node.cc | 11 +- .../lba_manager/btree/lba_btree_node.h | 42 +- .../os/seastore/object_data_handler.cc | 652 +- .../crimson/os/seastore/object_data_handler.h | 19 +- .../btree/omap_btree_node_impl.cc | 11 +- .../btree/string_kv_node_layout.h | 2 +- ceph/src/crimson/os/seastore/onode.h | 1 + .../staged-fltree/fltree_onode_manager.cc | 17 +- .../staged-fltree/fltree_onode_manager.h | 3 + .../os/seastore/random_block_manager.h | 38 + .../random_block_manager/block_rb_manager.cc | 23 +- .../random_block_manager/block_rb_manager.h | 10 +- .../random_block_manager/nvme_block_device.cc | 20 +- .../random_block_manager/nvme_block_device.h | 19 +- .../random_block_manager/rbm_device.cc | 57 +- .../random_block_manager/rbm_device.h | 30 +- .../src/crimson/os/seastore/record_scanner.cc | 239 + ceph/src/crimson/os/seastore/record_scanner.h | 83 + ceph/src/crimson/os/seastore/root_block.h | 2 +- ceph/src/crimson/os/seastore/seastore.cc | 81 +- ceph/src/crimson/os/seastore/seastore.h | 4 + .../src/crimson/os/seastore/seastore_types.cc | 17 +- ceph/src/crimson/os/seastore/seastore_types.h | 116 +- .../crimson/os/seastore/segment_manager.cc | 20 +- .../src/crimson/os/seastore/segment_manager.h | 2 +- .../segment_manager/{zns.cc => zbd.cc} | 255 +- .../seastore/segment_manager/{zns.h => zbd.h} | 48 +- .../os/seastore/segment_manager_group.cc | 234 +- .../os/seastore/segment_manager_group.h | 59 +- .../os/seastore/transaction_manager.cc | 36 +- .../crimson/os/seastore/transaction_manager.h | 344 +- ceph/src/crimson/osd/CMakeLists.txt | 1 + ceph/src/crimson/osd/heartbeat.cc | 193 +- ceph/src/crimson/osd/heartbeat.h | 75 +- ceph/src/crimson/osd/lsan_suppressions.cc | 20 + ceph/src/crimson/osd/main.cc | 3 +- .../osd/main_config_bootstrap_helpers.cc | 3 +- ceph/src/crimson/osd/object_context_loader.cc | 21 +- ceph/src/crimson/osd/object_context_loader.h | 2 +- ceph/src/crimson/osd/osd.cc | 352 +- ceph/src/crimson/osd/osd.h | 52 +- ceph/src/crimson/osd/osd_meta.cc | 4 +- .../osd/osd_operations/background_recovery.cc | 4 +- .../osd/osd_operations/background_recovery.h | 2 +- .../osd/osd_operations/client_request.cc | 25 +- .../osd/osd_operations/client_request.h | 2 +- .../osd_operations/internal_client_request.cc | 11 +- .../osd_operations/internal_client_request.h | 2 +- .../osd/osd_operations/logmissing_request.cc | 15 +- .../osd/osd_operations/logmissing_request.h | 4 +- .../logmissing_request_reply.cc | 2 +- .../osd_operations/logmissing_request_reply.h | 2 +- .../osd/osd_operations/peering_event.cc | 6 +- .../osd/osd_operations/peering_event.h | 2 +- .../osd/osd_operations/pg_advance_map.cc | 24 +- .../osd/osd_operations/pg_advance_map.h | 3 + .../osd/osd_operations/replicated_request.cc | 6 +- .../osd/osd_operations/replicated_request.h | 2 +- .../osd/osd_operations/snaptrim_event.cc | 72 +- .../osd/osd_operations/snaptrim_event.h | 8 +- ceph/src/crimson/osd/osdmap_gate.cc | 4 + ceph/src/crimson/osd/pg.cc | 3 +- ceph/src/crimson/osd/pg.h | 15 + ceph/src/crimson/osd/pg_backend.cc | 20 +- ceph/src/crimson/osd/pg_map.h | 91 +- ceph/src/crimson/osd/pg_shard_manager.cc | 55 +- ceph/src/crimson/osd/pg_shard_manager.h | 123 +- ceph/src/crimson/osd/shard_services.cc | 32 +- ceph/src/crimson/osd/shard_services.h | 29 +- ceph/src/crimson/osd/state.h | 58 +- ceph/src/crimson/osd/watch.cc | 58 +- ceph/src/crimson/osd/watch.h | 2 + ceph/src/crimson/tools/CMakeLists.txt | 4 + ceph/src/crimson/tools/perf_async_msgr.cc | 13 +- ceph/src/crimson/tools/perf_crimson_msgr.cc | 962 +- ceph/src/exporter/DaemonMetricCollector.cc | 56 +- ceph/src/exporter/DaemonMetricCollector.h | 3 +- ceph/src/include/ceph_fs.h | 79 +- ceph/src/include/cephfs/libcephfs.h | 53 + ceph/src/include/compat.h | 3 - ceph/src/include/rados.h | 1 + ceph/src/include/win32/fs_compat.h | 3 + ceph/src/libcephfs.cc | 120 + ceph/src/libcephsqlite.cc | 228 +- ceph/src/librbd/ImageWatcher.cc | 3 +- ceph/src/librbd/ManagedLock.cc | 6 +- .../mirror/snapshot/CreatePrimaryRequest.cc | 91 +- .../librbd/operation/SnapshotRemoveRequest.cc | 5 +- ceph/src/mds/BatchOp.h | 2 +- ceph/src/mds/CDentry.cc | 8 +- ceph/src/mds/CDentry.h | 21 +- ceph/src/mds/CDir.cc | 4 +- ceph/src/mds/CDir.h | 4 +- ceph/src/mds/CInode.cc | 4 +- ceph/src/mds/CInode.h | 7 +- ceph/src/mds/Capability.cc | 45 + ceph/src/mds/Capability.h | 29 +- ceph/src/mds/FSMap.h | 12 +- ceph/src/mds/FSMapUser.cc | 2 +- ceph/src/mds/FSMapUser.h | 4 +- ceph/src/mds/Locker.cc | 1 - ceph/src/mds/MDCache.cc | 98 +- ceph/src/mds/MDCache.h | 5 +- ceph/src/mds/MDLog.cc | 16 +- ceph/src/mds/MDSAuthCaps.cc | 18 +- ceph/src/mds/MDSAuthCaps.h | 6 +- ceph/src/mds/MDSCacheObject.h | 10 +- ceph/src/mds/MDSDaemon.cc | 15 +- ceph/src/mds/MDSDaemon.h | 2 +- ceph/src/mds/MDSRank.cc | 50 +- ceph/src/mds/MDSRank.h | 34 +- ceph/src/mds/Migrator.cc | 2 - ceph/src/mds/Mutation.cc | 142 +- ceph/src/mds/Mutation.h | 16 +- ceph/src/mds/ScrubHeader.h | 6 +- ceph/src/mds/ScrubStack.cc | 43 +- ceph/src/mds/ScrubStack.h | 2 + ceph/src/mds/Server.cc | 779 +- ceph/src/mds/Server.h | 40 +- ceph/src/mds/SessionMap.cc | 79 +- ceph/src/mds/SessionMap.h | 8 +- ceph/src/mds/SimpleLock.cc | 5 +- ceph/src/mds/SimpleLock.h | 1 + ceph/src/mds/SnapRealm.cc | 2 +- ceph/src/mds/StrayManager.cc | 40 +- ceph/src/mds/cephfs_features.cc | 1 + ceph/src/mds/cephfs_features.h | 6 +- ceph/src/mds/mdstypes.cc | 4 + ceph/src/mds/mdstypes.h | 1 + ceph/src/messages/MClientRequest.h | 29 +- ceph/src/messages/MDentryUnlink.h | 60 +- ceph/src/mgr/ActivePyModules.cc | 6 +- ceph/src/mgr/ActivePyModules.h | 2 +- ceph/src/mgr/BaseMgrModule.cc | 17 +- ceph/src/mgr/DaemonServer.cc | 9 +- ceph/src/mgr/DaemonServer.h | 3 +- ceph/src/mgr/Mgr.cc | 2 +- ceph/src/mgr/MgrClient.cc | 35 +- ceph/src/mgr/PyFormatter.cc | 5 + ceph/src/mgr/PyFormatter.h | 1 + ceph/src/mgr/PyModuleRegistry.h | 8 +- ceph/src/mon/AuthMonitor.cc | 4 +- ceph/src/mon/ConfigMonitor.cc | 1 + ceph/src/mon/ElectionLogic.cc | 3 +- ceph/src/mon/FSCommands.cc | 8 + ceph/src/mon/MDSMonitor.cc | 6 +- ceph/src/mon/MonClient.cc | 11 +- ceph/src/mon/MonCommands.h | 4 +- ceph/src/mon/MonMap.cc | 1 + ceph/src/mon/MonOpRequest.h | 2 +- ceph/src/mon/Monitor.cc | 21 +- ceph/src/mon/Monitor.h | 2 +- ceph/src/mon/OSDMonitor.cc | 613 +- ceph/src/mon/PGMap.cc | 18 +- ceph/src/msg/Dispatcher.h | 7 +- ceph/src/msg/Message.cc | 3 - ceph/src/msg/Message.h | 1 - ceph/src/msg/async/AsyncConnection.cc | 3 + ceph/src/msg/async/AsyncConnection.h | 1 + ceph/src/msg/async/ProtocolV1.cc | 1 + ceph/src/msg/async/ProtocolV2.cc | 1 + ceph/src/msg/async/Stack.h | 46 +- ceph/src/os/bluestore/AvlAllocator.cc | 6 +- ceph/src/os/bluestore/BlueFS.cc | 22 +- ceph/src/os/bluestore/BlueStore.cc | 2 +- ceph/src/os/bluestore/BtreeAllocator.cc | 6 +- ceph/src/os/bluestore/StupidAllocator.cc | 32 +- ceph/src/os/bluestore/StupidAllocator.h | 4 - ceph/src/os/bluestore/bluestore_tool.cc | 10 +- .../os/bluestore/fastbmap_allocator_impl.cc | 16 +- ceph/src/osd/OSD.cc | 6 +- ceph/src/osd/OSD.h | 4 +- ceph/src/osd/OSDMap.cc | 42 +- ceph/src/osd/OpRequest.cc | 3 +- ceph/src/osd/OpRequest.h | 11 +- ceph/src/osd/object_state_fmt.h | 23 + ceph/src/osd/scheduler/mClockScheduler.cc | 3 +- ceph/src/osd/scheduler/mClockScheduler.h | 33 +- ceph/src/perfglue/CMakeLists.txt | 2 +- ceph/src/pybind/cephfs/c_cephfs.pxd | 15 + ceph/src/pybind/cephfs/cephfs.pyx | 88 +- ceph/src/pybind/cephfs/mock_cephfs.pxi | 12 + ceph/src/pybind/mgr/ceph_module.pyi | 4 +- ceph/src/pybind/mgr/cephadm/agent.py | 2 +- ceph/src/pybind/mgr/cephadm/exchange.py | 164 + ceph/src/pybind/mgr/cephadm/inventory.py | 60 +- ceph/src/pybind/mgr/cephadm/migrations.py | 3 +- ceph/src/pybind/mgr/cephadm/module.py | 273 +- ceph/src/pybind/mgr/cephadm/schedule.py | 4 +- ceph/src/pybind/mgr/cephadm/serve.py | 184 +- .../mgr/cephadm/services/cephadmservice.py | 33 +- .../pybind/mgr/cephadm/services/ingress.py | 99 +- .../pybind/mgr/cephadm/services/monitoring.py | 43 +- ceph/src/pybind/mgr/cephadm/services/nfs.py | 40 +- .../src/pybind/mgr/cephadm/services/nvmeof.py | 93 + ceph/src/pybind/mgr/cephadm/services/osd.py | 3 +- ceph/src/pybind/mgr/cephadm/ssh.py | 59 +- .../templates/services/ingress/haproxy.cfg.j2 | 3 + .../services/ingress/keepalived.conf.j2 | 12 +- .../templates/services/nfs/ganesha.conf.j2 | 3 + .../services/nvmeof/ceph-nvmeof.conf.j2 | 34 + .../pybind/mgr/cephadm/tests/test_cephadm.py | 594 +- .../pybind/mgr/cephadm/tests/test_services.py | 1446 +- ceph/src/pybind/mgr/cephadm/tests/test_ssh.py | 49 + .../mgr/cephadm/tests/test_tuned_profiles.py | 35 + .../pybind/mgr/cephadm/tests/test_upgrade.py | 25 + ceph/src/pybind/mgr/cephadm/tuned_profiles.py | 4 +- ceph/src/pybind/mgr/cephadm/upgrade.py | 15 +- ceph/src/pybind/mgr/cephadm/utils.py | 19 +- .../ci/cephadm/run-cephadm-e2e-tests.sh | 2 + .../mgr/dashboard/controllers/cephfs.py | 207 +- .../mgr/dashboard/controllers/cluster.py | 84 +- .../mgr/dashboard/controllers/daemon.py | 18 +- .../pybind/mgr/dashboard/controllers/host.py | 101 +- .../dashboard/controllers/perf_counters.py | 2 +- .../mgr/dashboard/controllers/prometheus.py | 32 +- .../pybind/mgr/dashboard/controllers/rbd.py | 172 +- .../dashboard/controllers/rbd_mirroring.py | 17 +- .../pybind/mgr/dashboard/controllers/rgw.py | 295 +- ceph/src/pybind/mgr/dashboard/frontend/.npmrc | 1 + .../mgr/dashboard/frontend/CMakeLists.txt | 4 +- .../mgr/dashboard/frontend/angular.json | 5 +- .../mgr/dashboard/frontend/cypress.config.ts | 27 +- .../frontend/cypress/e2e/cluster/hosts.po.ts | 1 + .../frontend/cypress/e2e/cluster/logs.po.ts | 49 +- .../e2e/common/01-global.feature.po.ts | 187 - .../e2e/common/forms-helper.feature.po.ts | 77 + .../cypress/e2e/common/global.feature.po.ts | 40 + .../e2e/common/table-helper.feature.po.ts | 135 + .../frontend/cypress/e2e/common/urls.po.ts | 6 +- .../filesystems/filesystems.e2e-spec.feature | 30 + .../e2e/filesystems/filesystems.e2e-spec.ts | 16 - .../cypress/e2e/filesystems/filesystems.po.ts | 5 - .../subvolume-groups.e2e-spec.feature | 51 + .../filesystems/subvolumes.e2e-spec.feature | 51 + .../e2e/orchestrator/01-hosts.e2e-spec.ts | 24 - .../02-create-cluster-add-host.feature | 6 +- .../frontend/cypress/e2e/rgw/buckets.po.ts | 41 +- .../cypress/e2e/ui/dashboard-v3.e2e-spec.ts | 2 +- .../cypress/e2e/ui/notification.e2e-spec.ts | 2 +- .../frontend/cypress/support/commands.ts | 10 +- .../dist/en-US/119.066087561586659c.js | 1 + .../dist/en-US/25.9d84971ea743706b.js | 1 + .../frontend/dist/en-US/3rdpartylicenses.txt | 195 +- .../dist/en-US/43.cf51dac96ed4b14e.js | 1 - .../dist/en-US/543.eec5c8f9f29060da.js | 1 - .../dist/en-US/803.08339784f3bb5d16.js | 1 + .../dist/en-US/95.1ae8f43a396d3fea.js | 1 - .../dashboard/frontend/dist/en-US/index.html | 4 +- .../dist/en-US/main.871e04c0fd27227d.js | 3 + .../dist/en-US/main.8be028f171baab96.js | 3 - .../dist/en-US/polyfills.374f1f989f34e1be.js | 1 + .../dist/en-US/polyfills.4b60b22744014b0b.js | 1 - .../dist/en-US/runtime.4bd595c16d7c473d.js | 1 - .../dist/en-US/runtime.a53144ca583f6e2c.js | 1 + .../dist/en-US/scripts.177a7ad3f45b4499.js | 1 + .../dist/en-US/scripts.cfd741a72b67f696.js | 1 - .../dist/en-US/styles.5f6140b407c420b8.css | 17 + .../dist/en-US/styles.84a45510313e718c.css | 17 - .../mgr/dashboard/frontend/jest.config.cjs | 77 +- .../mgr/dashboard/frontend/package-lock.json | 47333 +++++++++------- .../mgr/dashboard/frontend/package.json | 47 +- .../frontend/src/app/app-routing.module.ts | 50 +- .../iscsi-setting/iscsi-setting.component.ts | 4 +- .../iscsi-target-discovery-modal.component.ts | 10 +- .../iscsi-target-form.component.spec.ts | 2 +- .../iscsi-target-form.component.ts | 52 +- ...i-target-image-settings-modal.component.ts | 12 +- ...csi-target-iqn-settings-modal.component.ts | 8 +- .../bootstrap-create-modal.component.ts | 16 +- .../bootstrap-import-modal.component.ts | 20 +- .../image-list/image-list.component.html | 10 + .../image-list/image-list.component.ts | 9 +- .../mirroring/overview/overview.component.ts | 4 +- .../pool-edit-mode-modal.component.ts | 4 +- .../pool-edit-peer-modal.component.ts | 10 +- .../rbd-configuration-form.component.ts | 6 +- .../block/rbd-form/rbd-feature.interface.ts | 1 + .../block/rbd-form/rbd-form.component.html | 3 + .../block/rbd-form/rbd-form.component.spec.ts | 1 - .../ceph/block/rbd-form/rbd-form.component.ts | 41 +- .../block/rbd-list/rbd-list.component.html | 47 +- .../block/rbd-list/rbd-list.component.spec.ts | 54 - .../ceph/block/rbd-list/rbd-list.component.ts | 46 +- .../rbd-namespace-form-modal.component.ts | 6 +- .../rbd-snapshot-form-modal.component.spec.ts | 18 +- .../rbd-snapshot-form-modal.component.ts | 7 +- .../rbd-snapshot-actions.model.ts | 19 +- .../rbd-snapshot-list.component.spec.ts | 21 +- .../rbd-snapshot-list.component.ts | 2 +- .../cephfs-directories.component.ts | 5 + .../cephfs-form/cephfs-form.component.html | 105 + .../cephfs-form/cephfs-form.component.scss} | 0 .../cephfs-form/cephfs-form.component.spec.ts | 82 + .../cephfs-form/cephfs-form.component.ts | 197 + .../cephfs-list/cephfs-list.component.html | 8 + .../cephfs-list/cephfs-list.component.spec.ts | 66 +- .../cephfs-list/cephfs-list.component.ts | 110 +- .../cephfs-subvolume-form.component.html | 186 + .../cephfs-subvolume-form.component.scss | 0 .../cephfs-subvolume-form.component.spec.ts | 77 + .../cephfs-subvolume-form.component.ts | 216 + .../cephfs-subvolume-group.component.html | 54 + .../cephfs-subvolume-group.component.scss | 0 .../cephfs-subvolume-group.component.spec.ts | 28 + .../cephfs-subvolume-group.component.ts | 178 + .../cephfs-subvolume-list.component.html | 123 + .../cephfs-subvolume-list.component.scss | 0 .../cephfs-subvolume-list.component.spec.ts | 30 + .../cephfs-subvolume-list.component.ts | 241 + .../cephfs-subvolumegroup-form.component.html | 148 + .../cephfs-subvolumegroup-form.component.scss | 0 ...phfs-subvolumegroup-form.component.spec.ts | 38 + .../cephfs-subvolumegroup-form.component.ts | 198 + .../cephfs-tabs/cephfs-tabs.component.html | 17 + .../src/app/ceph/cephfs/cephfs.module.ts | 29 +- .../src/app/ceph/cluster/cluster.module.ts | 12 +- .../configuration-form.component.ts | 18 +- .../create-cluster-review.component.ts | 4 +- .../create-cluster.component.ts | 4 +- .../hosts/host-form/host-form.component.ts | 14 +- .../ceph/cluster/hosts/hosts.component.html | 3 + .../cluster/hosts/hosts.component.spec.ts | 35 +- .../app/ceph/cluster/hosts/hosts.component.ts | 18 +- .../app/ceph/cluster/logs/logs.component.html | 28 +- .../app/ceph/cluster/logs/logs.component.scss | 4 + .../app/ceph/cluster/logs/logs.component.ts | 19 +- .../osd-flags-indiv-modal.component.ts | 4 +- .../osd-flags-modal.component.ts | 4 +- .../osd/osd-form/osd-form.component.ts | 10 +- .../osd/osd-list/osd-list.component.ts | 4 +- .../osd-recv-speed-modal.component.ts | 8 +- .../osd-scrub-modal.component.ts | 6 +- .../rules-list/rules-list.component.ts | 2 +- .../silence-matcher-modal.component.ts | 4 +- .../service-form/service-form.component.html | 72 +- .../service-form.component.spec.ts | 29 +- .../service-form/service-form.component.ts | 210 +- .../cluster/services/services.component.ts | 4 +- .../upgrade-start-modal.component.html | 89 + .../upgrade-start-modal.component.scss | 0 .../upgrade-start-modal.component.spec.ts | 32 + .../upgrade-start-modal.component.ts | 99 + .../upgrade-progress.component.html | 89 + .../upgrade-progress.component.scss | 0 .../upgrade-progress.component.spec.ts | 29 + .../upgrade-progress.component.ts | 140 + .../cluster/upgrade/upgrade.component.html | 233 + .../cluster/upgrade/upgrade.component.scss | 0 .../cluster/upgrade/upgrade.component.spec.ts | 230 + .../ceph/cluster/upgrade/upgrade.component.ts | 145 + .../dashboard-v3/card/card.component.html | 8 - .../dashboard-v3/card/card.component.scss | 5 - .../ceph/dashboard-v3/card/card.component.ts | 11 - .../dashboard-area-chart.component.html | 38 +- .../dashboard-area-chart.component.scss | 22 +- .../dashboard-area-chart.component.ts | 251 +- .../dashboard-pie.component.html | 2 +- .../dashboard-pie/dashboard-pie.component.ts | 174 +- .../dashboard-time-selector.component.scss | 4 - .../dashboard-time-selector.component.ts | 22 +- .../ceph/dashboard-v3/dashboard-v3.module.ts | 6 +- .../dashboard/dashboard-v3.component.html | 447 +- .../dashboard/dashboard-v3.component.scss | 76 +- .../dashboard/dashboard-v3.component.spec.ts | 20 +- .../dashboard/dashboard-v3.component.ts | 118 +- .../health-pie/health-pie.component.ts | 120 +- .../ceph/dashboard/health/health.component.ts | 44 +- .../nfs-form-client.component.ts | 24 +- .../ceph/nfs/nfs-form/nfs-form.component.ts | 34 +- .../pool/pool-form/pool-form.component.html | 9 + .../pool/pool-form/pool-form.component.ts | 32 +- ...create-rgw-service-entities.component.html | 70 + ...create-rgw-service-entities.component.scss | 0 ...ate-rgw-service-entities.component.spec.ts | 37 + .../create-rgw-service-entities.component.ts | 99 + ...ultisite-zone-deletion-form.component.html | 54 + ...ultisite-zone-deletion-form.component.scss | 9 + ...isite-zone-deletion-form.component.spec.ts | 32 + ...-multisite-zone-deletion-form.component.ts | 99 + ...ite-zonegroup-deletion-form.component.html | 75 + ...ite-zonegroup-deletion-form.component.scss | 9 + ...-zonegroup-deletion-form.component.spec.ts | 32 + ...isite-zonegroup-deletion-form.component.ts | 106 + .../src/app/ceph/rgw/models/rgw-multisite.ts | 52 + .../rgw-bucket-details.component.html | 94 +- .../rgw-bucket-details.component.spec.ts | 1 + .../rgw-bucket-list.component.ts | 2 +- .../rgw-multisite-details.component.html | 121 + .../rgw-multisite-details.component.scss | 13 + .../rgw-multisite-details.component.spec.ts | 43 + .../rgw-multisite-details.component.ts | 592 + .../rgw-multisite-export.component.html | 65 + .../rgw-multisite-export.component.scss | 0 .../rgw-multisite-export.component.spec.ts | 37 + .../rgw-multisite-export.component.ts | 62 + .../rgw-multisite-import.component.html | 182 + .../rgw-multisite-import.component.scss | 0 .../rgw-multisite-import.component.spec.ts | 37 + .../rgw-multisite-import.component.ts | 164 + .../rgw-multisite-migrate.component.html | 154 + .../rgw-multisite-migrate.component.scss | 0 .../rgw-multisite-migrate.component.spec.ts | 37 + .../rgw-multisite-migrate.component.ts | 194 + .../rgw-multisite-realm-form.component.html | 58 + .../rgw-multisite-realm-form.component.scss | 0 ...rgw-multisite-realm-form.component.spec.ts | 94 + .../rgw-multisite-realm-form.component.ts | 131 + .../rgw-multisite-zone-form.component.html | 283 + .../rgw-multisite-zone-form.component.scss | 0 .../rgw-multisite-zone-form.component.spec.ts | 37 + .../rgw-multisite-zone-form.component.ts | 328 + ...gw-multisite-zonegroup-form.component.html | 205 + ...gw-multisite-zonegroup-form.component.scss | 0 ...multisite-zonegroup-form.component.spec.ts | 102 + .../rgw-multisite-zonegroup-form.component.ts | 313 + .../rgw-overview-card-popover.scss | 20 + .../rgw-overview-dashboard.component.html | 185 + .../rgw-overview-dashboard.component.scss | 32 + .../rgw-overview-dashboard.component.spec.ts | 140 + .../rgw-overview-dashboard.component.ts | 166 + .../rgw-sync-data-info.component.html | 51 + .../rgw-sync-data-info.component.scss | 8 + .../rgw-sync-data-info.component.spec.ts | 25 + .../rgw-sync-data-info.component.ts | 16 + .../rgw-sync-metadata-info.component.html | 59 + .../rgw-sync-metadata-info.component.scss | 8 + .../rgw-sync-metadata-info.component.spec.ts | 25 + .../rgw-sync-metadata-info.component.ts | 16 + .../rgw-sync-primary-zone.component.html | 15 + .../rgw-sync-primary-zone.component.scss | 12 + .../rgw-sync-primary-zone.component.spec.ts | 23 + .../rgw-sync-primary-zone.component.ts | 22 + .../rgw-system-user.component.html | 37 + .../rgw-system-user.component.scss | 0 .../rgw-system-user.component.spec.ts | 37 + .../rgw-system-user.component.ts | 50 + .../rgw-user-form.component.html | 6 + .../rgw-user-list/rgw-user-list.component.ts | 2 +- .../rgw-user-tabs.component.spec.ts | 7 +- .../frontend/src/app/ceph/rgw/rgw.module.ts | 57 +- .../shared/feedback/feedback.component.ts | 12 +- .../login-password-form.component.scss | 5 + .../app/core/auth/login/login.component.html | 4 +- .../app/core/auth/login/login.component.scss | 2 +- .../auth/role-form/role-form.component.html | 58 +- .../role-form/role-form.component.spec.ts | 73 +- .../auth/role-form/role-form.component.ts | 155 +- .../user-password-form.component.scss | 6 + .../src/app/core/error/error.component.html | 6 +- .../src/app/core/error/error.component.ts | 6 + .../workbench-layout.component.html | 2 +- .../workbench-layout.component.scss | 4 + .../navigation/navigation.component.html | 16 + .../navigation/navigation.component.spec.ts | 7 +- .../cephfs-subvolume-group.service.spec.ts | 23 + .../api/cephfs-subvolume-group.service.ts | 79 + .../api/cephfs-subvolume.service.spec.ts | 43 + .../shared/api/cephfs-subvolume.service.ts | 96 + .../src/app/shared/api/cephfs.service.spec.ts | 16 + .../src/app/shared/api/cephfs.service.ts | 30 + .../src/app/shared/api/daemon.service.ts | 8 + .../src/app/shared/api/host.service.spec.ts | 13 +- .../src/app/shared/api/host.service.ts | 21 +- .../src/app/shared/api/prometheus.service.ts | 76 +- .../src/app/shared/api/rbd.service.spec.ts | 4 +- .../src/app/shared/api/rgw-bucket.service.ts | 6 + .../src/app/shared/api/rgw-daemon.service.ts | 11 + .../app/shared/api/rgw-multisite.service.ts | 32 + .../app/shared/api/rgw-realm.service.spec.ts | 22 + .../src/app/shared/api/rgw-realm.service.ts | 84 + .../app/shared/api/rgw-zone.service.spec.ts | 22 + .../src/app/shared/api/rgw-zone.service.ts | 168 + .../shared/api/rgw-zonegroup.service.spec.ts | 22 + .../app/shared/api/rgw-zonegroup.service.ts | 93 + .../app/shared/api/upgrade.service.spec.ts | 67 + .../src/app/shared/api/upgrade.service.ts | 78 + .../alert-panel/alert-panel.component.html | 3 +- .../alert-panel/alert-panel.component.ts | 2 + .../back-button/back-button.component.ts | 10 +- .../card-row/card-row.component.html | 58 +- .../card-row/card-row.component.scss | 4 + .../card-row/card-row.component.spec.ts | 7 +- .../card-row/card-row.component.ts | 0 .../components/card/card.component.html | 24 + .../components/card/card.component.scss | 0 .../components}/card/card.component.spec.ts | 0 .../shared/components/card/card.component.ts | 28 + .../cd-label/cd-label.component.html | 3 +- .../cd-label/cd-label.component.spec.ts | 7 +- .../components/cd-label/cd-label.component.ts | 1 + .../shared/components/components.module.ts | 10 +- .../config-option/config-option.component.ts | 4 +- .../confirmation-modal.component.ts | 6 +- .../copy2clipboard-button.component.html | 22 +- .../copy2clipboard-button.component.ts | 3 + ...critical-confirmation-modal.component.html | 1 + .../critical-confirmation-modal.component.ts | 17 +- .../date-time-picker.component.ts | 4 +- .../form-button-panel.component.ts | 17 +- .../form-modal/form-modal.component.ts | 11 +- .../components/select/select.component.ts | 6 +- .../submit-button/submit-button.component.ts | 4 +- .../usage-bar/usage-bar.component.html | 24 +- .../usage-bar/usage-bar.component.scss | 2 +- .../usage-bar/usage-bar.component.ts | 8 + .../src/app/shared/constants/app.constants.ts | 25 + .../checked-table-form.component.html | 55 + .../checked-table-form.component.scss | 0 .../checked-table-form.component.spec.ts | 138 + .../checked-table-form.component.ts | 165 + .../app/shared/datatable/datatable.module.ts | 7 +- .../table-actions.component.html | 2 +- .../table-pagination.component.spec.ts | 7 +- .../datatable/table/table.component.html | 19 +- .../shared/datatable/table/table.component.ts | 8 +- .../directives/dimless-binary.directive.ts | 8 +- .../cd-form-control.directive.ts | 4 +- .../cd-form-validation.directive.ts | 15 +- .../directives/stateful-tab.directive.spec.ts | 10 + .../directives/stateful-tab.directive.ts | 5 +- .../src/app/shared/enum/cell-template.enum.ts | 7 +- .../app/shared/enum/dashboard-promqls.enum.ts | 22 +- .../src/app/shared/enum/health-icon.enum.ts | 6 + .../src/app/shared/enum/icons.enum.ts | 1 + .../src/app/shared/forms/cd-form-builder.ts | 6 +- .../src/app/shared/forms/cd-form-group.ts | 6 +- .../src/app/shared/forms/cd-validators.ts | 7 +- .../crud-form/crud-form.component.spec.ts | 6 +- .../forms/crud-form/crud-form.component.ts | 4 +- .../formly-array-type.component.spec.ts | 17 +- .../formly-file-type.component.spec.ts | 9 +- .../formly-input-type.component.spec.ts | 17 +- .../formly-input-wrapper.component.spec.ts | 17 +- .../formly-object-type.component.spec.ts | 17 +- .../formly-textarea-type.component.spec.ts | 18 +- .../models/cephfs-subvolume-group.model.ts | 13 + .../shared/models/cephfs-subvolume.model.ts | 18 + .../models/cephfs-subvolumegroup.model.ts | 13 + .../app/shared/models/service.interface.ts | 3 + .../app/shared/models/upgrade.interface.ts | 15 + .../pipes/dimless-binary-per-second.pipe.ts | 19 +- .../app/shared/pipes/dimless-binary.pipe.ts | 19 +- .../src/app/shared/pipes/dimless.pipe.ts | 9 +- .../octal-to-human-readable.pipe.spec.ts | 32 + .../pipes/octal-to-human-readable.pipe.ts | 96 + .../src/app/shared/pipes/path.pipe.spec.ts | 18 + .../src/app/shared/pipes/path.pipe.ts | 17 + .../src/app/shared/pipes/pipes.module.ts | 13 +- .../app/shared/pipes/relative-date.pipe.ts | 5 +- .../src/app/shared/services/doc.service.ts | 2 + .../app/shared/services/formatter.service.ts | 20 +- .../services/module-status-guard.service.ts | 3 + .../services/number-formatter.service.ts | 40 +- .../services/prometheus-alert.service.ts | 10 +- .../shared/services/task-message.service.ts | 39 + .../mgr/dashboard/frontend/src/setupJest.ts | 4 + .../mgr/dashboard/frontend/src/styles.scss | 9 +- .../styles/defaults/_bootstrap-defaults.scss | 3 + .../frontend/src/testing/unit-test-helper.ts | 14 +- .../mgr/dashboard/frontend/tsconfig.json | 18 +- ceph/src/pybind/mgr/dashboard/module.py | 14 +- ceph/src/pybind/mgr/dashboard/openapi.yaml | 10417 ++-- .../pybind/mgr/dashboard/plugins/ttl_cache.py | 117 +- .../mgr/dashboard/requirements-test.txt | 2 +- .../mgr/dashboard/run-frontend-e2e-tests.sh | 7 +- .../src/pybind/mgr/dashboard/services/auth.py | 10 + .../mgr/dashboard/services/ceph_service.py | 30 + .../pybind/mgr/dashboard/services/cluster.py | 44 +- .../mgr/dashboard/services/orchestrator.py | 38 + ceph/src/pybind/mgr/dashboard/services/rbd.py | 200 +- .../mgr/dashboard/services/rgw_client.py | 789 +- ceph/src/pybind/mgr/dashboard/settings.py | 2 + .../pybind/mgr/dashboard/tests/test_cache.py | 48 + .../dashboard/tests/test_cluster_upgrade.py | 61 + .../pybind/mgr/dashboard/tests/test_daemon.py | 5 + .../pybind/mgr/dashboard/tests/test_host.py | 77 +- .../mgr/dashboard/tests/test_prometheus.py | 49 +- .../pybind/mgr/dashboard/tests/test_rgw.py | 26 +- .../mgr/dashboard/tests/test_rgw_client.py | 2 + ceph/src/pybind/mgr/devicehealth/module.py | 39 +- ceph/src/pybind/mgr/influx/module.py | 2 +- ceph/src/pybind/mgr/mgr_module.py | 43 +- ceph/src/pybind/mgr/mgr_util.py | 4 +- ceph/src/pybind/mgr/nfs/cluster.py | 25 +- .../src/pybind/mgr/orchestrator/_interface.py | 58 +- ceph/src/pybind/mgr/orchestrator/module.py | 157 +- ceph/src/pybind/mgr/pg_autoscaler/module.py | 135 +- .../tests/test_overlapping_roots.py | 107 +- ceph/src/pybind/mgr/prometheus/module.py | 83 +- .../rbd_support/mirror_snapshot_schedule.py | 32 +- ceph/src/pybind/mgr/rbd_support/perf.py | 19 +- .../mgr/rbd_support/trash_purge_schedule.py | 5 +- ceph/src/pybind/mgr/restful/api/perf.py | 2 +- ceph/src/pybind/mgr/rgw/module.py | 66 +- ceph/src/pybind/mgr/rook/ci/Dockerfile | 3 + .../pybind/mgr/rook/ci/run-rook-e2e-tests.sh | 9 + .../rook/ci/scripts/bootstrap-rook-cluster.sh | 135 + .../mgr/rook/ci/tests/features/rook.feature | 12 + .../ci/tests/features/steps/implementation.py | 21 + .../mgr/rook/ci/tests/features/steps/utils.py | 29 + ceph/src/pybind/mgr/rook/module.py | 16 +- ceph/src/pybind/mgr/rook/rook_cluster.py | 86 +- ceph/src/pybind/mgr/rook/tests/fixtures.py | 11 + ceph/src/pybind/mgr/rook/tests/test_rook.py | 120 + .../mgr/snap_schedule/fs/schedule_client.py | 21 +- ceph/src/pybind/mgr/snap_schedule/module.py | 108 +- .../tests/fs/test_schedule_client.py | 4 +- ceph/src/pybind/mgr/telegraf/module.py | 2 +- ceph/src/pybind/mgr/telemetry/module.py | 22 +- ceph/src/pybind/mgr/tox.ini | 3 +- ceph/src/pybind/mgr/volumes/fs/async_job.py | 8 +- ceph/src/pybind/mgr/volumes/fs/fs_util.py | 21 +- .../mgr/volumes/fs/operations/volume.py | 14 +- ceph/src/pybind/mgr/volumes/fs/volume.py | 2 +- ceph/src/pybind/rbd/rbd.pyx | 19 +- .../ceph/deployment/drive_group.py | 15 +- .../deployment/drive_selection/selector.py | 9 +- .../ceph/deployment/service_spec.py | 312 +- .../ceph/tests/test_drive_group.py | 12 + .../ceph/tests/test_service_spec.py | 310 +- ceph/src/rgw/driver/rados/rgw_bucket.cc | 441 +- ceph/src/rgw/driver/rados/rgw_bucket.h | 17 +- ceph/src/rgw/driver/rados/rgw_cr_rados.cc | 2 +- ceph/src/rgw/driver/rados/rgw_notify.cc | 85 +- ceph/src/rgw/driver/rados/rgw_notify.h | 3 + ceph/src/rgw/driver/rados/rgw_obj_manifest.h | 4 + ceph/src/rgw/driver/rados/rgw_pubsub_push.cc | 12 +- ceph/src/rgw/driver/rados/rgw_rados.cc | 225 +- ceph/src/rgw/driver/rados/rgw_rados.h | 21 +- ceph/src/rgw/driver/rados/rgw_sal_rados.cc | 4 +- ceph/src/rgw/driver/rados/rgw_sal_rados.h | 2 +- ceph/src/rgw/rgw_admin.cc | 265 +- ceph/src/rgw/rgw_amqp.cc | 408 +- ceph/src/rgw/rgw_amqp.h | 30 +- ceph/src/rgw/rgw_auth_keystone.cc | 37 +- ceph/src/rgw/rgw_auth_s3.cc | 59 +- ceph/src/rgw/rgw_auth_s3.h | 3 + ceph/src/rgw/rgw_common.cc | 20 +- ceph/src/rgw/rgw_common.h | 4 + ceph/src/rgw/rgw_cors.h | 12 + ceph/src/rgw/rgw_crypt.cc | 42 +- ceph/src/rgw/rgw_crypt.h | 10 +- ceph/src/rgw/rgw_env.cc | 15 + ceph/src/rgw/rgw_formats.cc | 5 + ceph/src/rgw/rgw_formats.h | 1 + ceph/src/rgw/rgw_kafka.cc | 36 +- ceph/src/rgw/rgw_kms.cc | 16 +- ceph/src/rgw/rgw_object_lock.cc | 7 +- ceph/src/rgw/rgw_op.cc | 24 +- ceph/src/rgw/rgw_opa.cc | 5 +- ceph/src/rgw/rgw_pubsub.cc | 102 +- ceph/src/rgw/rgw_pubsub.h | 16 +- ceph/src/rgw/rgw_rest_pubsub.cc | 18 +- ceph/src/rgw/rgw_rest_s3.cc | 137 +- ceph/src/rgw/rgw_rest_swift.cc | 20 +- ceph/src/rgw/rgw_s3select.cc | 3 +- ceph/src/rgw/rgw_sal.h | 2 +- ceph/src/rgw/rgw_sal_daos.cc | 4 +- ceph/src/rgw/rgw_sal_daos.h | 2 +- ceph/src/rgw/rgw_sal_dbstore.cc | 4 +- ceph/src/rgw/rgw_sal_dbstore.h | 2 +- ceph/src/rgw/rgw_sal_filter.h | 4 +- ceph/src/rgw/rgw_sal_motr.cc | 4 +- ceph/src/rgw/rgw_sal_motr.h | 2 +- ceph/src/rgw/rgw_swift_auth.cc | 12 +- ceph/src/rgw/services/svc_notify.cc | 74 +- ceph/src/rgw/services/svc_notify.h | 2 +- ceph/src/rgw/services/svc_zone.cc | 15 + ceph/src/rgw/services/svc_zone.h | 1 + .../TPCDS/ddl/create_tpcds_tables.sql | 651 + .../TPCDS/sample-queries-tpcds/README.md | 4 + .../TPCDS/sample-queries-tpcds/query1.sql | 25 + .../TPCDS/sample-queries-tpcds/query10.sql | 59 + .../TPCDS/sample-queries-tpcds/query11.sql | 81 + .../TPCDS/sample-queries-tpcds/query12.sql | 34 + .../TPCDS/sample-queries-tpcds/query13.sql | 52 + .../TPCDS/sample-queries-tpcds/query14.sql | 210 + .../TPCDS/sample-queries-tpcds/query15.sql | 20 + .../TPCDS/sample-queries-tpcds/query16.sql | 31 + .../TPCDS/sample-queries-tpcds/query17.sql | 45 + .../TPCDS/sample-queries-tpcds/query18.sql | 34 + .../TPCDS/sample-queries-tpcds/query19.sql | 25 + .../TPCDS/sample-queries-tpcds/query2.sql | 60 + .../TPCDS/sample-queries-tpcds/query20.sql | 30 + .../TPCDS/sample-queries-tpcds/query21.sql | 30 + .../TPCDS/sample-queries-tpcds/query22.sql | 20 + .../TPCDS/sample-queries-tpcds/query23.sql | 107 + .../TPCDS/sample-queries-tpcds/query24.sql | 107 + .../TPCDS/sample-queries-tpcds/query25.sql | 48 + .../TPCDS/sample-queries-tpcds/query26.sql | 21 + .../TPCDS/sample-queries-tpcds/query27.sql | 23 + .../TPCDS/sample-queries-tpcds/query28.sql | 53 + .../TPCDS/sample-queries-tpcds/query29.sql | 47 + .../TPCDS/sample-queries-tpcds/query3.sql | 21 + .../TPCDS/sample-queries-tpcds/query30.sql | 31 + .../TPCDS/sample-queries-tpcds/query31.sql | 52 + .../TPCDS/sample-queries-tpcds/query32.sql | 28 + .../TPCDS/sample-queries-tpcds/query33.sql | 75 + .../TPCDS/sample-queries-tpcds/query34.sql | 31 + .../TPCDS/sample-queries-tpcds/query35.sql | 58 + .../TPCDS/sample-queries-tpcds/query36.sql | 30 + .../TPCDS/sample-queries-tpcds/query37.sql | 17 + .../TPCDS/sample-queries-tpcds/query38.sql | 23 + .../TPCDS/sample-queries-tpcds/query39.sql | 54 + .../TPCDS/sample-queries-tpcds/query4.sql | 116 + .../TPCDS/sample-queries-tpcds/query40.sql | 28 + .../TPCDS/sample-queries-tpcds/query41.sql | 52 + .../TPCDS/sample-queries-tpcds/query42.sql | 22 + .../TPCDS/sample-queries-tpcds/query43.sql | 19 + .../TPCDS/sample-queries-tpcds/query44.sql | 35 + .../TPCDS/sample-queries-tpcds/query45.sql | 20 + .../TPCDS/sample-queries-tpcds/query46.sql | 35 + .../TPCDS/sample-queries-tpcds/query47.sql | 51 + .../TPCDS/sample-queries-tpcds/query48.sql | 67 + .../TPCDS/sample-queries-tpcds/query49.sql | 129 + .../TPCDS/sample-queries-tpcds/query5.sql | 128 + .../TPCDS/sample-queries-tpcds/query50.sql | 59 + .../TPCDS/sample-queries-tpcds/query51.sql | 45 + .../TPCDS/sample-queries-tpcds/query52.sql | 22 + .../TPCDS/sample-queries-tpcds/query53.sql | 28 + .../TPCDS/sample-queries-tpcds/query54.sql | 56 + .../TPCDS/sample-queries-tpcds/query55.sql | 14 + .../TPCDS/sample-queries-tpcds/query56.sql | 69 + .../TPCDS/sample-queries-tpcds/query57.sql | 48 + .../TPCDS/sample-queries-tpcds/query58.sql | 65 + .../TPCDS/sample-queries-tpcds/query59.sql | 44 + .../TPCDS/sample-queries-tpcds/query6.sql | 26 + .../TPCDS/sample-queries-tpcds/query60.sql | 78 + .../TPCDS/sample-queries-tpcds/query61.sql | 44 + .../TPCDS/sample-queries-tpcds/query62.sql | 35 + .../TPCDS/sample-queries-tpcds/query63.sql | 29 + .../TPCDS/sample-queries-tpcds/query64.sql | 121 + .../TPCDS/sample-queries-tpcds/query65.sql | 29 + .../TPCDS/sample-queries-tpcds/query66.sql | 220 + .../TPCDS/sample-queries-tpcds/query67.sql | 44 + .../TPCDS/sample-queries-tpcds/query68.sql | 42 + .../TPCDS/sample-queries-tpcds/query69.sql | 47 + .../TPCDS/sample-queries-tpcds/query7.sql | 21 + .../TPCDS/sample-queries-tpcds/query70.sql | 38 + .../TPCDS/sample-queries-tpcds/query71.sql | 40 + .../TPCDS/sample-queries-tpcds/query72.sql | 29 + .../TPCDS/sample-queries-tpcds/query73.sql | 28 + .../TPCDS/sample-queries-tpcds/query74.sql | 61 + .../TPCDS/sample-queries-tpcds/query75.sql | 70 + .../TPCDS/sample-queries-tpcds/query76.sql | 24 + .../TPCDS/sample-queries-tpcds/query77.sql | 108 + .../TPCDS/sample-queries-tpcds/query78.sql | 58 + .../TPCDS/sample-queries-tpcds/query79.sql | 23 + .../TPCDS/sample-queries-tpcds/query8.sql | 108 + .../TPCDS/sample-queries-tpcds/query80.sql | 96 + .../TPCDS/sample-queries-tpcds/query81.sql | 31 + .../TPCDS/sample-queries-tpcds/query82.sql | 17 + .../TPCDS/sample-queries-tpcds/query83.sql | 67 + .../TPCDS/sample-queries-tpcds/query84.sql | 21 + .../TPCDS/sample-queries-tpcds/query85.sql | 84 + .../TPCDS/sample-queries-tpcds/query86.sql | 26 + .../TPCDS/sample-queries-tpcds/query87.sql | 23 + .../TPCDS/sample-queries-tpcds/query88.sql | 94 + .../TPCDS/sample-queries-tpcds/query89.sql | 28 + .../TPCDS/sample-queries-tpcds/query9.sql | 51 + .../TPCDS/sample-queries-tpcds/query90.sql | 22 + .../TPCDS/sample-queries-tpcds/query91.sql | 31 + .../TPCDS/sample-queries-tpcds/query92.sql | 30 + .../TPCDS/sample-queries-tpcds/query93.sql | 18 + .../TPCDS/sample-queries-tpcds/query94.sql | 29 + .../TPCDS/sample-queries-tpcds/query95.sql | 32 + .../TPCDS/sample-queries-tpcds/query96.sql | 16 + .../TPCDS/sample-queries-tpcds/query97.sql | 25 + .../TPCDS/sample-queries-tpcds/query98.sql | 33 + .../TPCDS/sample-queries-tpcds/query99.sql | 35 + ceph/src/s3select/TPCDS/tpcds_functions.bash | 40 + .../s3select/container/trino/hms_trino.yaml | 31 + .../container/trino/run_trino_on_ceph.bash | 86 + .../trino/trino/catalog/hive.properties | 33 + .../container/trino/trino/config.properties | 5 + .../s3select/container/trino/trino/jvm.config | 19 + .../container/trino/trino/log.properties | 2 + .../container/trino/trino/node.properties | 2 + .../src/s3select/example/s3select_example.cpp | 17 +- ceph/src/s3select/include/s3select.h | 200 +- .../src/s3select/include/s3select_functions.h | 184 +- .../s3select/include/s3select_json_parser.h | 105 +- ceph/src/s3select/include/s3select_oper.h | 166 +- ceph/src/s3select/test/s3select_test.cpp | 120 +- ceph/src/test/cli/radosgw-admin/help.t | 15 +- ceph/src/test/crimson/CMakeLists.txt | 7 +- ceph/src/test/crimson/seastore/CMakeLists.txt | 6 +- .../seastore/nvmedevice/test_nvmedevice.cc | 11 +- .../onode_tree/test_fltree_onode_manager.cc | 19 +- .../seastore/onode_tree/test_staged_fltree.cc | 13 +- .../seastore/test_btree_lba_manager.cc | 10 +- .../test/crimson/seastore/test_cbjournal.cc | 106 +- .../seastore/test_collection_manager.cc | 15 +- .../seastore/test_object_data_handler.cc | 42 +- .../crimson/seastore/test_omap_manager.cc | 29 +- .../test/crimson/seastore/test_seastore.cc | 125 +- .../seastore/test_transaction_manager.cc | 657 +- .../seastore/transaction_manager_test_state.h | 31 +- ceph/src/test/crimson/test_alien_echo.cc | 4 +- ceph/src/test/crimson/test_messenger.cc | 1059 +- .../src/test/crimson/test_messenger_thrash.cc | 30 +- ceph/src/test/crimson/test_monc.cc | 2 +- ceph/src/test/crimson/test_socket.cc | 371 +- ceph/src/test/exporter/CMakeLists.txt | 1 + ceph/src/test/exporter/test_exporter.cc | 30 + ceph/src/test/fio/fio_ceph_messenger.cc | 2 +- ceph/src/test/libcephfs/CMakeLists.txt | 1 + ceph/src/test/libcephfs/multiclient.cc | 82 +- ceph/src/test/libcephfs/snapdiff.cc | 1684 + ceph/src/test/librados/misc.cc | 42 +- ceph/src/test/librbd/fsx.cc | 26 +- .../test_mock_CreatePrimaryRequest.cc | 366 +- .../test_mock_SnapshotRemoveRequest.cc | 56 +- ceph/src/test/mds/TestMDSAuthCaps.cc | 32 +- ceph/src/test/msgr/perf_msgr_client.cc | 2 +- ceph/src/test/msgr/perf_msgr_server.cc | 2 +- ceph/src/test/msgr/test_msgr.cc | 6 +- ceph/src/test/objectstore/Allocator_test.cc | 3 +- .../objectstore/fastbmap_allocator_test.cc | 181 +- ceph/src/test/objectstore/store_test.cc | 6 +- ceph/src/test/osd/TestOSDMap.cc | 4 +- ceph/src/test/pybind/test_cephfs.py | 67 +- .../test_mock_BootstrapRequest.cc | 53 + .../rbd_mirror/test_mock_ImageReplayer.cc | 39 + .../test/rgw/bucket_notification/test_bn.py | 90 +- ceph/src/test/rgw/test_rgw_amqp.cc | 370 +- ceph/src/test/rgw/test_rgw_crypto.cc | 60 +- ceph/src/tools/cephfs/JournalTool.cc | 2 +- ceph/src/tools/cephfs_mirror/FSMirror.h | 24 + .../tools/cephfs_mirror/InstanceWatcher.cc | 3 + .../src/tools/cephfs_mirror/InstanceWatcher.h | 13 + ceph/src/tools/cephfs_mirror/Mirror.cc | 47 +- ceph/src/tools/cephfs_mirror/Mirror.h | 19 +- ceph/src/tools/cephfs_mirror/MirrorWatcher.cc | 3 + ceph/src/tools/cephfs_mirror/MirrorWatcher.h | 13 + ceph/src/tools/cephfs_mirror/PeerReplayer.cc | 30 +- ceph/src/tools/cephfs_mirror/PeerReplayer.h | 1 + ceph/src/tools/rados/rados.cc | 2 +- ceph/src/tools/rbd_mirror/ImageReplayer.cc | 20 +- .../image_replayer/snapshot/Replayer.cc | 5 + ceph/src/tools/rbd_nbd/rbd-nbd.cc | 112 +- ceph/src/vstart.sh | 14 + 1362 files changed, 84852 insertions(+), 39656 deletions(-) delete mode 100644 ceph/debian/ceph-base.docs create mode 100644 ceph/doc/dev/balancer-design.rst delete mode 100644 ceph/doc/dev/cache-pool.rst delete mode 100644 ceph/doc/dev/prim-balancer-design.rst create mode 100644 ceph/doc/man/8/ceph-monstore-tool.rst create mode 100644 ceph/doc/monitoring/index.rst create mode 100644 ceph/doc/rados/operations/read-balancer.rst create mode 100644 ceph/doc/releases/reef.rst create mode 120000 ceph/qa/distros/crimson-supported-all-distro/centos_8.yaml create mode 120000 ceph/qa/distros/crimson-supported-all-distro/centos_latest.yaml create mode 120000 ceph/qa/distros/supported-all-distro/centos_latest.yaml create mode 120000 ceph/qa/distros/supported-all-distro/ubuntu_20.04.yaml rename ceph/qa/{suites/rbd/basic/cachepool/none.yaml => rbd/conf/+} (100%) create mode 100644 ceph/qa/rbd/conf/disable-pool-app.yaml rename ceph/qa/{suites/rbd/encryption/pool/ec-data-pool.yaml => rbd/data-pool/ec.yaml} (100%) rename ceph/qa/{suites/rbd/cli/pool => rbd/data-pool}/none.yaml (100%) rename ceph/qa/{suites/rbd/cli/pool/replicated-data-pool.yaml => rbd/data-pool/replicated.yaml} (100%) create mode 100755 ceph/qa/standalone/mon-stretch/mon-stretch-uneven-crush-weights.sh create mode 120000 ceph/qa/suites/crimson-rados/basic/centos_8.stream.yaml delete mode 120000 ceph/qa/suites/crimson-rados/basic/centos_latest.yaml create mode 120000 ceph/qa/suites/crimson-rados/basic/crimson-supported-all-distro create mode 120000 ceph/qa/suites/crimson-rados/rbd/centos_8.stream.yaml delete mode 120000 ceph/qa/suites/crimson-rados/rbd/centos_latest.yaml create mode 120000 ceph/qa/suites/crimson-rados/rbd/crimson-supported-all-distro rename ceph/qa/suites/{rbd/cli_v1/pool/none.yaml => crimson-rados/singleton/%} (100%) create mode 120000 ceph/qa/suites/crimson-rados/singleton/.qa create mode 120000 ceph/qa/suites/crimson-rados/singleton/all/.qa create mode 100644 ceph/qa/suites/crimson-rados/singleton/all/osd-backfill.yaml create mode 120000 ceph/qa/suites/crimson-rados/singleton/crimson-supported-all-distro create mode 120000 ceph/qa/suites/crimson-rados/singleton/crimson_qa_overrides.yaml create mode 120000 ceph/qa/suites/crimson-rados/singleton/objectstore create mode 120000 ceph/qa/suites/crimson-rados/singleton/rados.yaml create mode 120000 ceph/qa/suites/crimson-rados/thrash/centos_8.stream.yaml delete mode 120000 ceph/qa/suites/crimson-rados/thrash/centos_latest.yaml create mode 120000 ceph/qa/suites/crimson-rados/thrash/crimson-supported-all-distro rename ceph/qa/suites/{rbd/encryption/pool/none.yaml => fs/mirror-ha/cephfs-mirror/+} (100%) create mode 100644 ceph/qa/suites/fs/mirror-ha/cephfs-mirror/1-volume-create-rm.yaml rename ceph/qa/suites/fs/mirror-ha/cephfs-mirror/{three-per-cluster.yaml => 2-three-per-cluster.yaml} (100%) rename ceph/qa/suites/{rbd/librbd/config/none.yaml => fs/nfs/%} (100%) rename ceph/qa/suites/{krbd/singleton/msgr-failures => fs/nfs}/.qa (100%) rename ceph/qa/suites/{rbd/librbd/pool/none.yaml => fs/nfs/cluster/+} (100%) rename ceph/qa/suites/{rbd/basic/cachepool => fs/nfs/cluster}/.qa (100%) rename ceph/qa/suites/{orch/cephadm/workunits/task/test_nfs.yaml => fs/nfs/cluster/1-node.yaml} (65%) rename ceph/qa/suites/{rbd/cli/pool => fs/nfs/overrides}/.qa (100%) create mode 100644 ceph/qa/suites/fs/nfs/overrides/ignorelist_health.yaml rename ceph/qa/suites/{rgw/crypt/supported-random-distro$ => fs/nfs/supported-random-distros$} (100%) rename ceph/qa/suites/{rbd/cli_v1/pool => fs/nfs/tasks}/.qa (100%) create mode 100644 ceph/qa/suites/fs/nfs/tasks/nfs.yaml create mode 100644 ceph/qa/suites/fs/thrash/multifs/overrides/client-shutdown.yaml create mode 100644 ceph/qa/suites/fs/thrash/workloads/overrides/client-shutdown.yaml rename ceph/qa/suites/{rbd/migration/5-pool/none.yaml => krbd/singleton-msgr-failures/%} (100%) rename ceph/qa/suites/{rbd/encryption/pool => krbd/singleton-msgr-failures}/.qa (100%) create mode 120000 ceph/qa/suites/krbd/singleton-msgr-failures/bluestore-bitmap.yaml create mode 100644 ceph/qa/suites/krbd/singleton-msgr-failures/conf.yaml rename ceph/qa/suites/{rbd/librbd/config => krbd/singleton-msgr-failures/ms_mode$}/.qa (100%) create mode 100644 ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc-rxbounce.yaml create mode 100644 ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc.yaml create mode 100644 ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy-rxbounce.yaml create mode 100644 ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy.yaml create mode 100644 ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/secure.yaml rename ceph/qa/suites/{rbd/librbd/pool => krbd/singleton-msgr-failures/msgr-failures}/.qa (100%) rename ceph/qa/suites/krbd/{singleton => singleton-msgr-failures}/msgr-failures/few.yaml (100%) rename ceph/qa/suites/krbd/{singleton => singleton-msgr-failures}/msgr-failures/many.yaml (100%) rename ceph/qa/suites/{rbd/migration/5-pool => krbd/singleton-msgr-failures/tasks}/.qa (100%) rename ceph/qa/suites/krbd/{singleton => singleton-msgr-failures}/tasks/rbd_xfstests.yaml (100%) create mode 100644 ceph/qa/suites/krbd/singleton/tasks/krbd_watch_errors.yaml create mode 120000 ceph/qa/suites/orch/cephadm/nfs create mode 100644 ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml create mode 100644 ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml create mode 100644 ceph/qa/suites/orch/cephadm/workunits/task/test_ca_signed_key.yaml delete mode 100644 ceph/qa/suites/rbd/basic/cachepool/small.yaml create mode 120000 ceph/qa/suites/rbd/basic/conf create mode 120000 ceph/qa/suites/rbd/cli/conf create mode 120000 ceph/qa/suites/rbd/cli/data-pool delete mode 100644 ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml delete mode 100644 ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml create mode 100644 ceph/qa/suites/rbd/cli/workloads/rbd_support_module_recovery.yaml create mode 120000 ceph/qa/suites/rbd/cli_v1/conf delete mode 100644 ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml create mode 120000 ceph/qa/suites/rbd/encryption/conf create mode 120000 ceph/qa/suites/rbd/encryption/data-pool delete mode 100644 ceph/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml delete mode 100644 ceph/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml delete mode 100644 ceph/qa/suites/rbd/encryption/pool/small-cache-pool.yaml create mode 120000 ceph/qa/suites/rbd/immutable-object-cache/conf create mode 120000 ceph/qa/suites/rbd/iscsi/conf create mode 120000 ceph/qa/suites/rbd/librbd/conf create mode 120000 ceph/qa/suites/rbd/librbd/data-pool rename ceph/qa/suites/rbd/{qemu/pool => librbd/extra-conf}/.qa (100%) rename ceph/qa/suites/rbd/librbd/{config => extra-conf}/copy-on-read.yaml (100%) rename ceph/qa/suites/rbd/{qemu/pool => librbd/extra-conf}/none.yaml (100%) rename ceph/qa/suites/rbd/librbd/{config => extra-conf}/permit-partial-discard.yaml (100%) delete mode 100644 ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml delete mode 100644 ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml delete mode 100644 ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml create mode 120000 ceph/qa/suites/rbd/maintenance/conf create mode 120000 ceph/qa/suites/rbd/migration/5-data-pool delete mode 100644 ceph/qa/suites/rbd/migration/5-pool/ec-data-pool.yaml delete mode 100644 ceph/qa/suites/rbd/migration/5-pool/replicated-data-pool.yaml create mode 120000 ceph/qa/suites/rbd/migration/conf create mode 120000 ceph/qa/suites/rbd/mirror-thrash/conf create mode 120000 ceph/qa/suites/rbd/mirror/conf create mode 120000 ceph/qa/suites/rbd/nbd/conf create mode 120000 ceph/qa/suites/rbd/pwl-cache/home/conf create mode 120000 ceph/qa/suites/rbd/pwl-cache/tmpfs/conf create mode 120000 ceph/qa/suites/rbd/qemu/conf create mode 120000 ceph/qa/suites/rbd/qemu/data-pool delete mode 100644 ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml delete mode 100644 ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml delete mode 100644 ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml delete mode 100644 ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml create mode 120000 ceph/qa/suites/rbd/singleton-bluestore/conf create mode 120000 ceph/qa/suites/rbd/singleton/conf create mode 120000 ceph/qa/suites/rbd/thrash/conf delete mode 100644 ceph/qa/suites/rbd/thrash/thrashers/cache.yaml create mode 120000 ceph/qa/suites/rbd/valgrind/conf create mode 120000 ceph/qa/suites/rgw/cloud-transition/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/crypt/ubuntu_latest.yaml create mode 120000 ceph/qa/suites/rgw/dbstore/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/hadoop-s3a/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/lifecycle/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/notifications/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/service-token/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/tempest/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/thrash/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/tools/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/rgw/upgrade/ignore-pg-availability.yaml create mode 100644 ceph/qa/suites/rgw/verify/tasks/bucket-check.yaml create mode 100644 ceph/qa/suites/rgw/verify/tasks/mp_reupload.yaml create mode 120000 ceph/qa/suites/rgw/website/ignore-pg-availability.yaml create mode 120000 ceph/qa/suites/smoke/basic/supported-all-distro delete mode 120000 ceph/qa/suites/smoke/basic/supported-random-distro$ create mode 100755 ceph/qa/workunits/rbd/krbd_watch_errors.sh create mode 100755 ceph/qa/workunits/rbd/rbd_support_module_recovery.sh create mode 100755 ceph/qa/workunits/rgw/run-bucket-check.sh create mode 100755 ceph/qa/workunits/rgw/test_rgw_bucket_check.py create mode 100755 ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.py create mode 100755 ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh create mode 100644 ceph/src/crimson/os/seastore/record_scanner.cc create mode 100644 ceph/src/crimson/os/seastore/record_scanner.h rename ceph/src/crimson/os/seastore/segment_manager/{zns.cc => zbd.cc} (71%) rename ceph/src/crimson/os/seastore/segment_manager/{zns.h => zbd.h} (84%) create mode 100644 ceph/src/crimson/osd/lsan_suppressions.cc create mode 100644 ceph/src/osd/object_state_fmt.h create mode 100644 ceph/src/pybind/mgr/cephadm/exchange.py create mode 100644 ceph/src/pybind/mgr/cephadm/services/nvmeof.py create mode 100644 ceph/src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/01-global.feature.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/forms-helper.feature.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/global.feature.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/table-helper.feature.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.feature delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.po.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolume-groups.e2e-spec.feature create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolumes.e2e-spec.feature create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/119.066087561586659c.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/25.9d84971ea743706b.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/43.cf51dac96ed4b14e.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/543.eec5c8f9f29060da.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/803.08339784f3bb5d16.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/95.1ae8f43a396d3fea.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.871e04c0fd27227d.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/main.8be028f171baab96.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/polyfills.374f1f989f34e1be.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/polyfills.4b60b22744014b0b.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.4bd595c16d7c473d.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/runtime.a53144ca583f6e2c.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/scripts.177a7ad3f45b4499.js delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/scripts.cfd741a72b67f696.js create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.5f6140b407c420b8.css delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/styles.84a45510313e718c.css create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.html rename ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/{dashboard-v3/card-row/card-row.component.scss => cephfs/cephfs-form/cephfs-form.component.scss} (100%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-form/cephfs-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-form/cephfs-subvolume-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-group/cephfs-subvolume-group.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolume-list/cephfs-subvolume-list.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-subvolumegroup-form/cephfs-subvolumegroup-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-form/upgrade-start-modal.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade-progress/upgrade-progress.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/upgrade/upgrade.component.ts delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/card/card.component.html delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/card/card.component.scss delete mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/dashboard-v3/card/card.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/create-rgw-service-entities/create-rgw-service-entities.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zone-deletion-form/rgw-multisite-zone-deletion-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite-zonegroup-deletion-form/rgw-multisite-zonegroup-deletion-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/models/rgw-multisite.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-details/rgw-multisite-details.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-export/rgw-multisite-export.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-import/rgw-multisite-import.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-migrate/rgw-multisite-migrate.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-realm-form/rgw-multisite-realm-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zone-form/rgw-multisite-zone-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-multisite-zonegroup-form/rgw-multisite-zonegroup-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-card-popover.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-overview-dashboard/rgw-overview-dashboard.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-data-info/rgw-sync-data-info.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-metadata-info/rgw-sync-metadata-info.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-sync-primary-zone/rgw-sync-primary-zone.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/ceph/rgw/rgw-system-user/rgw-system-user.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume-group.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume-group.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-subvolume.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-multisite.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-realm.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-realm.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zone.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zone.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zonegroup.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/rgw-zonegroup.service.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/upgrade.service.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/api/upgrade.service.ts rename ceph/src/pybind/mgr/dashboard/frontend/src/app/{ceph/dashboard-v3 => shared/components}/card-row/card-row.component.html (75%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/card-row/card-row.component.scss rename ceph/src/pybind/mgr/dashboard/frontend/src/app/{ceph/dashboard-v3 => shared/components}/card-row/card-row.component.spec.ts (77%) rename ceph/src/pybind/mgr/dashboard/frontend/src/app/{ceph/dashboard-v3 => shared/components}/card-row/card-row.component.ts (100%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/card/card.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/card/card.component.scss rename ceph/src/pybind/mgr/dashboard/frontend/src/app/{ceph/dashboard-v3 => shared/components}/card/card.component.spec.ts (100%) create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/components/card/card.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.html create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.scss create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/datatable/checked-table-form/checked-table-form.component.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/cephfs-subvolume-group.model.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/cephfs-subvolume.model.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/cephfs-subvolumegroup.model.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/models/upgrade.interface.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/octal-to-human-readable.pipe.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/octal-to-human-readable.pipe.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/path.pipe.spec.ts create mode 100644 ceph/src/pybind/mgr/dashboard/frontend/src/app/shared/pipes/path.pipe.ts create mode 100644 ceph/src/pybind/mgr/dashboard/tests/test_cache.py create mode 100644 ceph/src/pybind/mgr/dashboard/tests/test_cluster_upgrade.py create mode 100644 ceph/src/pybind/mgr/rook/ci/Dockerfile create mode 100755 ceph/src/pybind/mgr/rook/ci/run-rook-e2e-tests.sh create mode 100755 ceph/src/pybind/mgr/rook/ci/scripts/bootstrap-rook-cluster.sh create mode 100644 ceph/src/pybind/mgr/rook/ci/tests/features/rook.feature create mode 100644 ceph/src/pybind/mgr/rook/ci/tests/features/steps/implementation.py create mode 100644 ceph/src/pybind/mgr/rook/ci/tests/features/steps/utils.py create mode 100644 ceph/src/pybind/mgr/rook/tests/fixtures.py create mode 100644 ceph/src/pybind/mgr/rook/tests/test_rook.py create mode 100644 ceph/src/s3select/TPCDS/ddl/create_tpcds_tables.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/README.md create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query1.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query10.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query11.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query12.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query13.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query14.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query15.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query16.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query17.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query18.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query19.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query2.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query20.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query21.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query22.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query23.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query24.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query25.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query26.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query27.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query28.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query29.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query3.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query30.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query31.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query32.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query33.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query34.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query35.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query36.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query37.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query38.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query39.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query4.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query40.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query41.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query42.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query43.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query44.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query45.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query46.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query47.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query48.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query49.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query5.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query50.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query51.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query52.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query53.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query54.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query55.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query56.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query57.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query58.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query59.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query6.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query60.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query61.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query62.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query63.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query64.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query65.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query66.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query67.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query68.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query69.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query7.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query70.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query71.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query72.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query73.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query74.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query75.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query76.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query77.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query78.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query79.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query8.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query80.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query81.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query82.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query83.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query84.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query85.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query86.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query87.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query88.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query89.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query9.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query90.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query91.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query92.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query93.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query94.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query95.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query96.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query97.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query98.sql create mode 100644 ceph/src/s3select/TPCDS/sample-queries-tpcds/query99.sql create mode 100644 ceph/src/s3select/TPCDS/tpcds_functions.bash create mode 100644 ceph/src/s3select/container/trino/hms_trino.yaml create mode 100644 ceph/src/s3select/container/trino/run_trino_on_ceph.bash create mode 100644 ceph/src/s3select/container/trino/trino/catalog/hive.properties create mode 100644 ceph/src/s3select/container/trino/trino/config.properties create mode 100644 ceph/src/s3select/container/trino/trino/jvm.config create mode 100644 ceph/src/s3select/container/trino/trino/log.properties create mode 100644 ceph/src/s3select/container/trino/trino/node.properties create mode 100644 ceph/src/test/libcephfs/snapdiff.cc diff --git a/ceph/.github/pull_request_template.md b/ceph/.github/pull_request_template.md index 834ef7428..494a3f23e 100644 --- a/ceph/.github/pull_request_template.md +++ b/ceph/.github/pull_request_template.md @@ -22,7 +22,9 @@ ## Contribution Guidelines - To sign and title your commits, please refer to [Submitting Patches to Ceph](https://github.com/ceph/ceph/blob/main/SubmittingPatches.rst). -- If you are submitting a fix for a stable branch (e.g. "pacific"), please refer to [Submitting Patches to Ceph - Backports](https://github.com/ceph/ceph/blob/master/SubmittingPatches-backports.rst) for the proper workflow. +- If you are submitting a fix for a stable branch (e.g. "quincy"), please refer to [Submitting Patches to Ceph - Backports](https://github.com/ceph/ceph/blob/master/SubmittingPatches-backports.rst) for the proper workflow. + +- When filling out the below checklist, you may click boxes directly in the GitHub web UI. When entering or editing the entire PR message in the GitHub web UI editor, you may also select a checklist item by adding an `x` between the brackets: `[x]`. Spaces and capitalization matter when checking off items this way. ## Checklist - Tracker (select at least one) @@ -62,4 +64,5 @@ - `jenkins test ceph-volume all` - `jenkins test ceph-volume tox` - `jenkins test windows` +- `jenkins test rook e2e` diff --git a/ceph/CMakeLists.txt b/ceph/CMakeLists.txt index 91f4923c5..47a239d2b 100644 --- a/ceph/CMakeLists.txt +++ b/ceph/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.16) project(ceph - VERSION 18.2.0 + VERSION 18.2.1 LANGUAGES CXX C ASM) cmake_policy(SET CMP0028 NEW) diff --git a/ceph/PendingReleaseNotes b/ceph/PendingReleaseNotes index 5113444f9..03520c97b 100644 --- a/ceph/PendingReleaseNotes +++ b/ceph/PendingReleaseNotes @@ -1,3 +1,53 @@ +>=19.0.0 + +* RGW: S3 multipart uploads using Server-Side Encryption now replicate correctly in + multi-site. Previously, the replicas of such objects were corrupted on decryption. + A new tool, ``radosgw-admin bucket resync encrypted multipart``, can be used to + identify these original multipart uploads. The ``LastModified`` timestamp of any + identified object is incremented by 1ns to cause peer zones to replicate it again. + For multi-site deployments that make any use of Server-Side Encryption, we + recommended running this command against every bucket in every zone after all + zones have upgraded. +* CEPHFS: MDS evicts clients which are not advancing their request tids which causes + a large buildup of session metadata resulting in the MDS going read-only due to + the RADOS operation exceeding the size threshold. `mds_session_metadata_threshold` + config controls the maximum size that a (encoded) session metadata can grow. +* RGW: New tools have been added to radosgw-admin for identifying and + correcting issues with versioned bucket indexes. Historical bugs with the + versioned bucket index transaction workflow made it possible for the index + to accumulate extraneous "book-keeping" olh entries and plain placeholder + entries. In some specific scenarios where clients made concurrent requests + referencing the same object key, it was likely that a lot of extra index + entries would accumulate. When a significant number of these entries are + present in a single bucket index shard, they can cause high bucket listing + latencies and lifecycle processing failures. To check whether a versioned + bucket has unnecessary olh entries, users can now run ``radosgw-admin + bucket check olh``. If the ``--fix`` flag is used, the extra entries will + be safely removed. A distinct issue from the one described thus far, it is + also possible that some versioned buckets are maintaining extra unlinked + objects that are not listable from the S3/ Swift APIs. These extra objects + are typically a result of PUT requests that exited abnormally, in the middle + of a bucket index transaction - so the client would not have received a + successful response. Bugs in prior releases made these unlinked objects easy + to reproduce with any PUT request that was made on a bucket that was actively + resharding. Besides the extra space that these hidden, unlinked objects + consume, there can be another side effect in certain scenarios, caused by + the nature of the failure mode that produced them, where a client of a bucket + that was a victim of this bug may find the object associated with the key to + be in an inconsistent state. To check whether a versioned bucket has unlinked + entries, users can now run ``radosgw-admin bucket check unlinked``. If the + ``--fix`` flag is used, the unlinked objects will be safely removed. Finally, + a third issue made it possible for versioned bucket index stats to be + accounted inaccurately. The tooling for recalculating versioned bucket stats + also had a bug, and was not previously capable of fixing these inaccuracies. + This release resolves those issues and users can now expect that the existing + ``radosgw-admin bucket check`` command will produce correct results. We + recommend that users with versioned buckets, especially those that existed + on prior releases, use these new tools to check whether their buckets are + affected and to clean them up accordingly. +* mgr/snap-schedule: For clusters with multiple CephFS file systems, all the + snap-schedule commands now expect the '--fs' argument. + >=18.0.0 * The RGW policy parser now rejects unknown principals by default. If you are @@ -171,6 +221,11 @@ https://docs.ceph.com/en/reef/rados/configuration/mclock-config-ref/ * CEPHFS: After recovering a Ceph File System post following the disaster recovery procedure, the recovered files under `lost+found` directory can now be deleted. + https://docs.ceph.com/en/latest/rados/configuration/mclock-config-ref/ +* mgr/snap_schedule: The snap-schedule mgr module now retains one less snapshot + than the number mentioned against the config tunable `mds_max_snaps_per_dir` + so that a new snapshot can be created and retained during the next schedule + run. >=17.2.1 diff --git a/ceph/README.md b/ceph/README.md index 1f00c5dd1..7cb2c240f 100644 --- a/ceph/README.md +++ b/ceph/README.md @@ -1,23 +1,25 @@ # Ceph - a scalable distributed storage system -Please see https://ceph.com/ for current info. +See https://ceph.com/ for current information about Ceph. ## Contributing Code -Most of Ceph is dual licensed under the LGPL version 2.1 or 3.0. Some -miscellaneous code is under a BSD-style license or is public domain. -The documentation is licensed under Creative Commons -Attribution Share Alike 3.0 (CC-BY-SA-3.0). There are a handful of headers -included here that are licensed under the GPL. Please see the file -COPYING for a full inventory of licenses by file. +Most of Ceph is dual-licensed under the LGPL version 2.1 or 3.0. Some +miscellaneous code is either public domain or licensed under a BSD-style +license. -Code contributions must include a valid "Signed-off-by" acknowledging -the license for the modified or contributed file. Please see the file -SubmittingPatches.rst for details on what that means and on how to -generate and submit patches. +The Ceph documentation is licensed under Creative Commons Attribution Share +Alike 3.0 (CC-BY-SA-3.0). -We do not require assignment of copyright to contribute code; code is +Some headers included in the `ceph/ceph` repository are licensed under the GPL. +See the file `COPYING` for a full inventory of licenses by file. + +All code contributions must include a valid "Signed-off-by" line. See the file +`SubmittingPatches.rst` for details on this and instructions on how to generate +and submit patches. + +Assignment of copyright is not required to contribute code. Code is contributed under the terms of the applicable license. @@ -33,10 +35,11 @@ command on a system that has git installed: git clone https://github.com/ceph/ceph.git -When the ceph/ceph repository has been cloned to your system, run the following -command to check out the git submodules associated with the ceph/ceph -repository: +When the `ceph/ceph` repository has been cloned to your system, run the +following commands to move into the cloned `ceph/ceph` repository and to check +out the git submodules associated with it: + cd ceph git submodule update --init --recursive @@ -63,34 +66,42 @@ Install the ``python3-routes`` package: These instructions are meant for developers who are compiling the code for development and testing. To build binaries that are suitable for installation -we recommend that you build .deb or .rpm packages, or refer to ``ceph.spec.in`` -or ``debian/rules`` to see which configuration options are specified for -production builds. +we recommend that you build `.deb` or `.rpm` packages, or refer to +``ceph.spec.in`` or ``debian/rules`` to see which configuration options are +specified for production builds. -Build instructions: +To build Ceph, make sure that you are in the top-level `ceph` directory that +contains `do_cmake.sh` and `CONTRIBUTING.rst` and run the following commands: ./do_cmake.sh cd build ninja -``do_cmake.sh`` defaults to creating a debug build of Ceph that can be up to 5x -slower with some workloads. Pass ``-DCMAKE_BUILD_TYPE=RelWithDebInfo`` to -``do_cmake.sh`` to create a non-debug release. - -The number of jobs used by `ninja` is derived from the number of CPU cores of -the building host if unspecified. Use the `-j` option to limit the job number -if the build jobs are running out of memory. On average, each job takes around -2.5GiB memory. - -This assumes that you make your build directory a subdirectory of the ceph.git -checkout. If you put it elsewhere, just point `CEPH_GIT_DIR` to the correct -path to the checkout. Additional CMake args can be specified by setting ARGS -before invoking ``do_cmake.sh``. See [cmake options](#cmake-options) -for more details. For example: +``do_cmake.sh`` by default creates a "debug build" of Ceph, which can be up to +five times slower than a non-debug build. Pass +``-DCMAKE_BUILD_TYPE=RelWithDebInfo`` to ``do_cmake.sh`` to create a non-debug +build. + +[Ninja](https://ninja-build.org/) is the buildsystem used by the Ceph project +to build test builds. The number of jobs used by `ninja` is derived from the +number of CPU cores of the building host if unspecified. Use the `-j` option to +limit the job number if the build jobs are running out of memory. If you +attempt to run `ninja` and receive a message that reads `g++: fatal error: +Killed signal terminated program cc1plus`, then you have run out of memory. +Using the `-j` option with an argument appropriate to the hardware on which the +`ninja` command is run is expected to result in a successful build. For example, +to limit the job number to 3, run the command `ninja -j 3`. On average, each +`ninja` job run in parallel needs approximately 2.5 GiB of RAM. + +This documentation assumes that your build directory is a subdirectory of the +`ceph.git` checkout. If the build directory is located elsewhere, point +`CEPH_GIT_DIR` to the correct path of the checkout. Additional CMake args can +be specified by setting ARGS before invoking ``do_cmake.sh``. See [cmake +options](#cmake-options) for more details. For example: ARGS="-DCMAKE_C_COMPILER=gcc-7" ./do_cmake.sh -To build only certain targets use: +To build only certain targets, run a command of the following form: ninja [target name] @@ -153,24 +164,25 @@ are committed to git.) ## Running a test cluster -To run a functional test cluster, +From the `ceph/` directory, run the following commands to launch a test Ceph +cluster: cd build ninja vstart # builds just enough to run vstart ../src/vstart.sh --debug --new -x --localhost --bluestore ./bin/ceph -s -Almost all of the usual commands are available in the bin/ directory. -For example, +Most Ceph commands are available in the `bin/` directory. For example: - ./bin/rados -p rbd bench 30 write ./bin/rbd create foo --size 1000 + ./bin/rados -p foo bench 30 write -To shut down the test cluster, +To shut down the test cluster, run the following command from the `build/` +directory: ../src/stop.sh -To start or stop individual daemons, the sysvinit script can be used: +Use the sysvinit script to start or stop individual daemons: ./bin/init-ceph restart osd.0 ./bin/init-ceph stop diff --git a/ceph/ceph.spec b/ceph/ceph.spec index 0d559f923..9c5703b6e 100644 --- a/ceph/ceph.spec +++ b/ceph/ceph.spec @@ -170,7 +170,7 @@ # main package definition ################################################################################# Name: ceph -Version: 18.2.0 +Version: 18.2.1 Release: 0%{?dist} %if 0%{?fedora} || 0%{?rhel} Epoch: 2 @@ -186,7 +186,7 @@ License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD- Group: System/Filesystems %endif URL: http://ceph.com/ -Source0: %{?_remote_tarball_prefix}ceph-18.2.0.tar.bz2 +Source0: %{?_remote_tarball_prefix}ceph-18.2.1.tar.bz2 %if 0%{?suse_version} # _insert_obs_source_lines_here ExclusiveArch: x86_64 aarch64 ppc64le s390x @@ -1292,7 +1292,7 @@ This package provides a Ceph MIB for SNMP traps. # common ################################################################################# %prep -%autosetup -p1 -n ceph-18.2.0 +%autosetup -p1 -n ceph-18.2.1 %build # Disable lto on systems that do not support symver attribute diff --git a/ceph/changelog.upstream b/ceph/changelog.upstream index f76511bb4..44c7d4fdf 100644 --- a/ceph/changelog.upstream +++ b/ceph/changelog.upstream @@ -1,7 +1,13 @@ -ceph (18.2.0-1jammy) jammy; urgency=medium +ceph (18.2.1-1jammy) jammy; urgency=medium - -- Jenkins Build Slave User Thu, 03 Aug 2023 18:57:50 +0000 + -- Jenkins Build Slave User Mon, 11 Dec 2023 22:07:48 +0000 + +ceph (18.2.1-1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Mon, 11 Dec 2023 21:55:36 +0000 ceph (18.2.0-1) stable; urgency=medium diff --git a/ceph/debian/ceph-base.docs b/ceph/debian/ceph-base.docs deleted file mode 100644 index e845566c0..000000000 --- a/ceph/debian/ceph-base.docs +++ /dev/null @@ -1 +0,0 @@ -README diff --git a/ceph/debian/ceph-mon.postinst b/ceph/debian/ceph-mon.postinst index b33f34b6b..688d8141d 100644 --- a/ceph/debian/ceph-mon.postinst +++ b/ceph/debian/ceph-mon.postinst @@ -1,3 +1,4 @@ +#!/bin/sh # vim: set noet ts=8: # postinst script for ceph-mon # diff --git a/ceph/debian/ceph-osd.postinst b/ceph/debian/ceph-osd.postinst index 5e44548fe..04e33b860 100644 --- a/ceph/debian/ceph-osd.postinst +++ b/ceph/debian/ceph-osd.postinst @@ -1,3 +1,4 @@ +#!/bin/sh # vim: set noet ts=8: # postinst script for ceph-osd # diff --git a/ceph/debian/compat b/ceph/debian/compat index ec635144f..48082f72f 100644 --- a/ceph/debian/compat +++ b/ceph/debian/compat @@ -1 +1 @@ -9 +12 diff --git a/ceph/debian/control b/ceph/debian/control index 89b3b3741..837a55a37 100644 --- a/ceph/debian/control +++ b/ceph/debian/control @@ -4,7 +4,7 @@ Priority: optional Homepage: http://ceph.com/ Vcs-Git: git://github.com/ceph/ceph.git Vcs-Browser: https://github.com/ceph/ceph -Maintainer: Ceph Maintainers +Maintainer: Ceph Maintainers Uploaders: Ken Dreyer , Alfredo Deza , Build-Depends: automake, @@ -20,8 +20,7 @@ Build-Depends: automake, git, golang, gperf, - g++ (>= 7), - hostname , + g++ (>= 11), javahelper, jq , jsonnet , @@ -135,9 +134,6 @@ Package: ceph-base Architecture: linux-any Depends: binutils, ceph-common (= ${binary:Version}), - debianutils, - findutils, - grep, logrotate, parted, psmisc, @@ -187,8 +183,9 @@ Description: debugging symbols for ceph-base Package: cephadm Architecture: linux-any -Recommends: podman (>= 2.0.2) | docker.io +Recommends: podman (>= 2.0.2) | docker.io | docker-ce Depends: lvm2, + python3, ${python3:Depends}, Description: cephadm utility to bootstrap ceph daemons with systemd and containers Ceph is a massively scalable, open-source, distributed @@ -431,7 +428,6 @@ Depends: ceph-osd (= ${binary:Version}), e2fsprogs, lvm2, parted, - util-linux, xfsprogs, ${misc:Depends}, ${python3:Depends} @@ -759,7 +755,7 @@ Architecture: any Section: debug Priority: extra Depends: libsqlite3-mod-ceph (= ${binary:Version}), - libsqlite3-0-dbgsym + libsqlite3-0-dbgsym, ${misc:Depends}, Description: debugging symbols for libsqlite3-mod-ceph A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS @@ -1207,14 +1203,14 @@ Description: Java Native Interface library for CephFS Java bindings Package: rados-objclass-dev Architecture: linux-any Section: libdevel -Depends: librados-dev (= ${binary:Version}) ${misc:Depends}, +Depends: librados-dev (= ${binary:Version}), ${misc:Depends}, Description: RADOS object class development kit. . This package contains development files needed for building RADOS object class plugins. Package: cephfs-shell Architecture: all -Depends: ${misc:Depends} +Depends: ${misc:Depends}, ${python3:Depends} Description: interactive shell for the Ceph distributed file system Ceph is a massively scalable, open-source, distributed @@ -1227,7 +1223,7 @@ Description: interactive shell for the Ceph distributed file system Package: cephfs-top Architecture: all -Depends: ${misc:Depends} +Depends: ${misc:Depends}, ${python3:Depends} Description: This package provides a top(1) like utility to display various filesystem metrics in realtime. diff --git a/ceph/debian/copyright b/ceph/debian/copyright index 8375e4c4b..8dc4b9e4f 100644 --- a/ceph/debian/copyright +++ b/ceph/debian/copyright @@ -1,6 +1,6 @@ -Format-Specification: http://anonscm.debian.org/viewvc/dep/web/deps/dep5/copyright-format.xml?revision=279&view=markup -Name: ceph -Maintainer: Sage Weil +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: ceph +Upstream-Contact: Ceph Developers Source: http://ceph.com/ Files: * @@ -180,3 +180,553 @@ Files: src/include/timegm.h Copyright (C) Copyright Howard Hinnant Copyright (C) Copyright 2010-2011 Vicente J. Botet Escriba License: Boost Software License, Version 1.0 + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + The complete text of the Apache License, Version 2.0 + can be found in "/usr/share/common-licenses/Apache-2.0". + + +License: GPL-2 + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + . + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + . + On Debian systems, the complete text of the GNU General Public License + version 2 can be found in `/usr/share/common-licenses/GPL-2' file. + +License: GPL-2+ + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + . + This package is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see . + . + On Debian systems, the complete text of the GNU General Public License + version 2 can be found in `/usr/share/common-licenses/GPL-2'. + +License: GPL-3+ + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + . + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see . + . + On Debian systems, the complete text of the GNU General Public + License version 3 can be found in `/usr/share/common-licenses/GPL-3'. + +License: LGPL-2.1 + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License version 2.1 as published by the Free Software Foundation. + . + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + . + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + . + On Debian systems, the complete text of the GNU Lesser General + Public License can be found in `/usr/share/common-licenses/LGPL-2.1'. + +License: LGPL-2.1+ + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + . + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + . + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + . + On Debian systems, the complete text of the GNU Lesser General + Public License can be found in `/usr/share/common-licenses/LGPL-2.1'. + +License: LGPL-2+ + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License version 2 (or later) as published by the Free Software Foundation. + . + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + . + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + . + On Debian systems, the complete text of the GNU Lesser General + Public License 2 can be found in `/usr/share/common-licenses/LGPL-2'. + +License: MIT + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + . + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + . + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +License: CC-BY-SA-3.0 + Creative Commons Attribution-ShareAlike 3.0 Unported + ․ + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION + ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE + INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + ITS USE. + ․ + License + ․ + THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE + COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY + COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS + AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + ․ + BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE + TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY + BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS + CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND + CONDITIONS. + ․ + 1. Definitions + ․ + a. "Adaptation" means a work based upon the Work, or upon the Work and + other pre-existing works, such as a translation, adaptation, derivative + work, arrangement of music or other alterations of a literary or + artistic work, or phonogram or performance and includes cinematographic + adaptations or any other form in which the Work may be recast, + transformed, or adapted including in any form recognizably derived from + the original, except that a work that constitutes a Collection will not + be considered an Adaptation for the purpose of this License. For the + avoidance of doubt, where the Work is a musical work, performance or + phonogram, the synchronization of the Work in timed-relation with a + moving image ("synching") will be considered an Adaptation for the + purpose of this License. + ․ + b. "Collection" means a collection of literary or artistic works, such + as encyclopedias and anthologies, or performances, phonograms or + broadcasts, or other works or subject matter other than works listed in + Section 1(f) below, which, by reason of the selection and arrangement of + their contents, constitute intellectual creations, in which the Work is + included in its entirety in unmodified form along with one or more other + contributions, each constituting separate and independent works in + themselves, which together are assembled into a collective whole. A work + that constitutes a Collection will not be considered an Adaptation (as + defined below) for the purposes of this License. + ․ + c. "Creative Commons Compatible License" means a license that is listed + at http://creativecommons.org/compatiblelicenses that has been approved + by Creative Commons as being essentially equivalent to this License, + including, at a minimum, because that license: (i) contains terms that + have the same purpose, meaning and effect as the License Elements of + this License; and, (ii) explicitly permits the relicensing of + adaptations of works made available under that license under this + License or a Creative Commons jurisdiction license with the same License + Elements as this License. + ․ + d. "Distribute" means to make available to the public the original and + copies of the Work or Adaptation, as appropriate, through sale or other + transfer of ownership. + ․ + e. "License Elements" means the following high-level license attributes + as selected by Licensor and indicated in the title of this License: + Attribution, ShareAlike. + ․ + f. "Licensor" means the individual, individuals, entity or entities that + offer(s) the Work under the terms of this License. + ․ + g. "Original Author" means, in the case of a literary or artistic work, + the individual, individuals, entity or entities who created the Work or + if no individual or entity can be identified, the publisher; and in + addition (i) in the case of a performance the actors, singers, + musicians, dancers, and other persons who act, sing, deliver, declaim, + play in, interpret or otherwise perform literary or artistic works or + expressions of folklore; (ii) in the case of a phonogram the producer + being the person or legal entity who first fixes the sounds of a + performance or other sounds; and, (iii) in the case of broadcasts, the + organization that transmits the broadcast. + ․ + h. "Work" means the literary and/or artistic work offered under the + terms of this License including without limitation any production in the + literary, scientific and artistic domain, whatever may be the mode or + form of its expression including digital form, such as a book, pamphlet + and other writing; a lecture, address, sermon or other work of the same + nature; a dramatic or dramatico-musical work; a choreographic work or + entertainment in dumb show; a musical composition with or without words; + a cinematographic work to which are assimilated works expressed by a + process analogous to cinematography; a work of drawing, painting, + architecture, sculpture, engraving or lithography; a photographic work + to which are assimilated works expressed by a process analogous to + photography; a work of applied art; an illustration, map, plan, sketch + or three-dimensional work relative to geography, topography, + architecture or science; a performance; a broadcast; a phonogram; a + compilation of data to the extent it is protected as a copyrightable + work; or a work performed by a variety or circus performer to the extent + it is not otherwise considered a literary or artistic work. + ․ + i. "You" means an individual or entity exercising rights under this + License who has not previously violated the terms of this License with + respect to the Work, or who has received express permission from the + Licensor to exercise rights under this License despite a previous + violation. + ․ + j. "Publicly Perform" means to perform public recitations of the Work + and to communicate to the public those public recitations, by any means + or process, including by wire or wireless means or public digital + performances; to make available to the public Works in such a way that + members of the public may access these Works from a place and at a place + individually chosen by them; to perform the Work to the public by any + means or process and the communication to the public of the performances + of the Work, including by public digital performance; to broadcast and + rebroadcast the Work by any means including signs, sounds or images. + ․ + k. "Reproduce" means to make copies of the Work by any means including + without limitation by sound or visual recordings and the right of + fixation and reproducing fixations of the Work, including storage of a + protected performance or phonogram in digital form or other electronic + medium. + ․ + 2. Fair Dealing Rights. Nothing in this License is intended to reduce, + limit, or restrict any uses free from copyright or rights arising from + limitations or exceptions that are provided for in connection with the + copyright protection under copyright law or other applicable laws. + ․ + 3. License Grant. Subject to the terms and conditions of this License, + Licensor hereby grants You a worldwide, royalty-free, non-exclusive, + perpetual (for the duration of the applicable copyright) license to + exercise the rights in the Work as stated below: + ․ + a. to Reproduce the Work, to incorporate the Work into one or more + Collections, and to Reproduce the Work as incorporated in the + Collections; + ․ + b. to create and Reproduce Adaptations provided that any such + Adaptation, including any translation in any medium, takes reasonable + steps to clearly label, demarcate or otherwise identify that changes + were made to the original Work. For example, a translation could be + marked "The original work was translated from English to Spanish," or a + modification could indicate "The original work has been modified."; + ․ + c. to Distribute and Publicly Perform the Work including as incorporated + in Collections; and, + ․ + d. to Distribute and Publicly Perform Adaptations. + ․ + e. For the avoidance of doubt: + ․ + i. Non-waivable Compulsory License Schemes. In those jurisdictions in + which the right to collect royalties through any statutory or compulsory + licensing scheme cannot be waived, the Licensor reserves the exclusive + right to collect such royalties for any exercise by You of the rights + granted under this License; + ․ + ii. Waivable Compulsory License Schemes. In those jurisdictions in which + the right to collect royalties through any statutory or compulsory + licensing scheme can be waived, the Licensor waives the exclusive right + to collect such royalties for any exercise by You of the rights granted + under this License; and, + ․ + iii. Voluntary License Schemes. The Licensor waives the right to collect + royalties, whether individually or, in the event that the Licensor is a + member of a collecting society that administers voluntary licensing + schemes, via that society, from any exercise by You of the rights + granted under this License. + ․ + The above rights may be exercised in all media and formats whether now + known or hereafter devised. The above rights include the right to make + such modifications as are technically necessary to exercise the rights + in other media and formats. Subject to Section 8(f), all rights not + expressly granted by Licensor are hereby reserved. + ․ + 4. Restrictions. The license granted in Section 3 above is expressly + made subject to and limited by the following restrictions: + ․ + a. You may Distribute or Publicly Perform the Work only under the terms + of this License. You must include a copy of, or the Uniform Resource + Identifier (URI) for, this License with every copy of the Work You + Distribute or Publicly Perform. You may not offer or impose any terms on + the Work that restrict the terms of this License or the ability of the + recipient of the Work to exercise the rights granted to that recipient + under the terms of the License. You may not sublicense the Work. You + must keep intact all notices that refer to this License and to the + disclaimer of warranties with every copy of the Work You Distribute or + Publicly Perform. When You Distribute or Publicly Perform the Work, You + may not impose any effective technological measures on the Work that + restrict the ability of a recipient of the Work from You to exercise the + rights granted to that recipient under the terms of the License. This + Section 4(a) applies to the Work as incorporated in a Collection, but + this does not require the Collection apart from the Work itself to be + made subject to the terms of this License. If You create a Collection, + upon notice from any Licensor You must, to the extent practicable, + remove from the Collection any credit as required by Section 4(c), as + requested. If You create an Adaptation, upon notice from any Licensor + You must, to the extent practicable, remove from the Adaptation any + credit as required by Section 4(c), as requested. + ․ + b. You may Distribute or Publicly Perform an Adaptation only under the + terms of: (i) this License; (ii) a later version of this License with + the same License Elements as this License; (iii) a Creative Commons + jurisdiction license (either this or a later license version) that + contains the same License Elements as this License (e.g., + Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible + License. If you license the Adaptation under one of the licenses + mentioned in (iv), you must comply with the terms of that license. If + you license the Adaptation under the terms of any of the licenses + mentioned in (i), (ii) or (iii) (the "Applicable License"), you must + comply with the terms of the Applicable License generally and the + following provisions: (I) You must include a copy of, or the URI for, + the Applicable License with every copy of each Adaptation You Distribute + or Publicly Perform; (II) You may not offer or impose any terms on the + Adaptation that restrict the terms of the Applicable License or the + ability of the recipient of the Adaptation to exercise the rights + granted to that recipient under the terms of the Applicable License; + (III) You must keep intact all notices that refer to the Applicable + License and to the disclaimer of warranties with every copy of the Work + as included in the Adaptation You Distribute or Publicly Perform; (IV) + when You Distribute or Publicly Perform the Adaptation, You may not + impose any effective technological measures on the Adaptation that + restrict the ability of a recipient of the Adaptation from You to + exercise the rights granted to that recipient under the terms of the + Applicable License. This Section 4(b) applies to the Adaptation as + incorporated in a Collection, but this does not require the Collection + apart from the Adaptation itself to be made subject to the terms of the + Applicable License. + ․ + c. If You Distribute, or Publicly Perform the Work or any Adaptations or + Collections, You must, unless a request has been made pursuant to + Section 4(a), keep intact all copyright notices for the Work and + provide, reasonable to the medium or means You are utilizing: (i) the + name of the Original Author (or pseudonym, if applicable) if supplied, + and/or if the Original Author and/or Licensor designate another party or + parties (e.g., a sponsor institute, publishing entity, journal) for + attribution ("Attribution Parties") in Licensor's copyright notice, + terms of service or by other reasonable means, the name of such party or + parties; (ii) the title of the Work if supplied; (iii) to the extent + reasonably practicable, the URI, if any, that Licensor specifies to be + associated with the Work, unless such URI does not refer to the + copyright notice or licensing information for the Work; and (iv) , + consistent with Ssection 3(b), in the case of an Adaptation, a credit + identifying the use of the Work in the Adaptation (e.g., "French + translation of the Work by Original Author," or "Screenplay based on + original Work by Original Author"). The credit required by this Section + 4(c) may be implemented in any reasonable manner; provided, however, + that in the case of a Adaptation or Collection, at a minimum such credit + will appear, if a credit for all contributing authors of the Adaptation + or Collection appears, then as part of these credits and in a manner at + least as prominent as the credits for the other contributing authors. + For the avoidance of doubt, You may only use the credit required by this + Section for the purpose of attribution in the manner set out above and, + by exercising Your rights under this License, You may not implicitly or + explicitly assert or imply any connection with, sponsorship or + endorsement by the Original Author, Licensor and/or Attribution Parties, + as appropriate, of You or Your use of the Work, without the separate, + express prior written permission of the Original Author, Licensor and/or + Attribution Parties. + ․ + d. Except as otherwise agreed in writing by the Licensor or as may be + otherwise permitted by applicable law, if You Reproduce, Distribute or + Publicly Perform the Work either by itself or as part of any Adaptations + or Collections, You must not distort, mutilate, modify or take other + derogatory action in relation to the Work which would be prejudicial to + the Original Author's honor or reputation. Licensor agrees that in those + jurisdictions (e.g. Japan), in which any exercise of the right granted + in Section 3(b) of this License (the right to make Adaptations) would be + deemed to be a distortion, mutilation, modification or other derogatory + action prejudicial to the Original Author's honor and reputation, the + Licensor will waive or not assert, as appropriate, this Section, to the + fullest extent permitted by the applicable national law, to enable You + to reasonably exercise Your right under Section 3(b) of this License + (right to make Adaptations) but not otherwise. + ․ + 5. Representations, Warranties and Disclaimer + ․ + UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR + OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY + KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, + INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, + FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF + LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, + WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE + EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + ․ + 6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE + LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR + ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES + ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS + BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + ․ + 7. Termination + ․ + a. This License and the rights granted hereunder will terminate + automatically upon any breach by You of the terms of this License. + Individuals or entities who have received Adaptations or Collections + from You under this License, however, will not have their licenses + terminated provided such individuals or entities remain in full + compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will + survive any termination of this License. + ․ + b. Subject to the above terms and conditions, the license granted here + is perpetual (for the duration of the applicable copyright in the Work). + Notwithstanding the above, Licensor reserves the right to release the + Work under different license terms or to stop distributing the Work at + any time; provided, however that any such election will not serve to + withdraw this License (or any other license that has been, or is + required to be, granted under the terms of this License), and this + License will continue in full force and effect unless terminated as + stated above. + ․ + 8. Miscellaneous + ․ + a. Each time You Distribute or Publicly Perform the Work or a + Collection, the Licensor offers to the recipient a license to the Work + on the same terms and conditions as the license granted to You under + this License. + ․ + b. Each time You Distribute or Publicly Perform an Adaptation, Licensor + offers to the recipient a license to the original Work on the same terms + and conditions as the license granted to You under this License. + ․ + c. If any provision of this License is invalid or unenforceable under + applicable law, it shall not affect the validity or enforceability of + the remainder of the terms of this License, and without further action + by the parties to this agreement, such provision shall be reformed to + the minimum extent necessary to make such provision valid and + enforceable. + ․ + d. No term or provision of this License shall be deemed waived and no + breach consented to unless such waiver or consent shall be in writing + and signed by the party to be charged with such waiver or consent. + ․ + e. This License constitutes the entire agreement between the parties + with respect to the Work licensed here. There are no understandings, + agreements or representations with respect to the Work not specified + here. Licensor shall not be bound by any additional provisions that may + appear in any communication from You. This License may not be modified + without the mutual written agreement of the Licensor and You. + ․ + f. The rights granted under, and the subject matter referenced, in this + License were drafted utilizing the terminology of the Berne Convention + for the Protection of Literary and Artistic Works (as amended on + September 28, 1979), the Rome Convention of 1961, the WIPO Copyright + Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and + the Universal Copyright Convention (as revised on July 24, 1971). These + rights and subject matter take effect in the relevant jurisdiction in + which the License terms are sought to be enforced according to the + corresponding provisions of the implementation of those treaty + provisions in the applicable national law. If the standard suite of + rights granted under applicable copyright law includes additional rights + not granted under this License, such additional rights are deemed to be + included in the License; this License is not intended to restrict the + license of any rights under applicable law. + ․ + ․ + Creative Commons Notice + ․ + Creative Commons is not a party to this License, and makes no warranty + whatsoever in connection with the Work. Creative Commons will not be + liable to You or any party on any legal theory for any damages + whatsoever, including without limitation any general, special, + incidental or consequential damages arising in connection to this + license. Notwithstanding the foregoing two (2) sentences, if Creative + Commons has expressly identified itself as the Licensor hereunder, it + shall have all rights and obligations of Licensor. + ․ + Except for the limited purpose of indicating to the public that the Work + is licensed under the CCPL, Creative Commons does not authorize the use + by either party of the trademark "Creative Commons" or any related + trademark or logo of Creative Commons without the prior written consent + of Creative Commons. Any permitted use will be in compliance with + Creative Commons' then-current trademark usage guidelines, as may be + published on its website or otherwise made available upon request from + time to time. For the avoidance of doubt, this trademark restriction + does not form part of the License. + ․ + Creative Commons may be contacted at http://creativecommons.org/. + +License: BSD-3-clause + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + . + 1. Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + . + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + . + 3. Neither the name of the copyright holder nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + . + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ceph/debian/rules b/ceph/debian/rules index c163daad2..ed7f4a255 100755 --- a/ceph/debian/rules +++ b/ceph/debian/rules @@ -5,7 +5,6 @@ export DESTDIR=$(CURDIR)/debian/tmp include /usr/share/dpkg/default.mk -extraopts += -DCMAKE_C_COMPILER=gcc-11 -DCMAKE_CXX_COMPILER=g++-11 ifneq (,$(findstring WITH_STATIC_LIBSTDCXX,$(CEPH_EXTRA_CMAKE_ARGS))) # dh_auto_build sets LDFLAGS with `dpkg-buildflags --get LDFLAGS` on ubuntu, # which makes the application aborts when the shared library throws @@ -59,19 +58,15 @@ py3_overrides_packages := $(basename $(notdir $(wildcard debian/*.requires))) py3_packages := cephfs-shell cephfs-top cephadm %: - dh $@ --buildsystem=cmake --with javahelper,python3,systemd --parallel + dh $@ --buildsystem=cmake --with javahelper,python3 --parallel override_dh_auto_configure: env | sort dh_auto_configure --buildsystem=cmake -- $(extraopts) $(CEPH_EXTRA_CMAKE_ARGS) -override_dh_auto_build: - dh_auto_build --buildsystem=cmake - cp src/init-radosgw debian/radosgw.init - override_dh_auto_clean: dh_auto_clean --buildsystem=cmake - rm -f debian/radosgw.init debian/ceph.logrotate + rm -f debian/radosgw.init debian/ceph.logrotate debian/ceph-base.docs override_dh_auto_install: dh_auto_install --buildsystem=cmake --destdir=$(DESTDIR) @@ -87,13 +82,12 @@ override_dh_auto_install: override_dh_installchangelogs: dh_installchangelogs --exclude doc/changelog -override_dh_installdocs: - override_dh_installlogrotate: cp src/logrotate.conf debian/ceph-common.logrotate dh_installlogrotate -pceph-common override_dh_installinit: + cp src/init-radosgw debian/radosgw.init # install the systemd stuff manually since we have funny service names install -d -m0755 debian/ceph-common/etc/default install -m0644 etc/default/ceph debian/ceph-common/etc/default/ @@ -103,15 +97,9 @@ override_dh_installinit: dh_installinit -p ceph-base --name ceph --no-start dh_installinit -p radosgw --no-start - # NOTE: execute systemd helpers so they pickup dh_install'ed units and targets - dh_systemd_enable - dh_systemd_start --no-restart-on-upgrade - -override_dh_systemd_enable: - # systemd enable done as part of dh_installinit - -override_dh_systemd_start: - # systemd start done as part of dh_installinit +override_dh_installsystemd: + # Only enable and start systemd targets + dh_installsystemd --no-stop-on-upgrade --no-restart-after-upgrade -Xceph-mon.service -Xceph-osd.service -X ceph-mds.service override_dh_strip: dh_strip -pceph-mds --dbg-package=ceph-mds-dbg @@ -152,8 +140,12 @@ override_dh_python3: @for pkg in $(py3_packages); do \ dh_python3 -p $$pkg; \ done + dh_python3 -p ceph-base --shebang=/usr/bin/python3 + dh_python3 -p ceph-common --shebang=/usr/bin/python3 + dh_python3 -p ceph-fuse --shebang=/usr/bin/python3 + dh_python3 -p ceph-volume --shebang=/usr/bin/python3 # do not run tests override_dh_auto_test: -.PHONY: override_dh_autoreconf override_dh_auto_configure override_dh_auto_build override_dh_auto_clean override_dh_auto_install override_dh_installdocs override_dh_installlogrotate override_dh_installinit override_dh_systemd_start override_dh_strip override_dh_auto_test +.PHONY: override_dh_autoreconf override_dh_auto_configure override_dh_auto_clean override_dh_auto_install override_dh_installlogrotate override_dh_installinit override_dh_strip override_dh_auto_test diff --git a/ceph/doc/architecture.rst b/ceph/doc/architecture.rst index 405263557..852225ce6 100644 --- a/ceph/doc/architecture.rst +++ b/ceph/doc/architecture.rst @@ -30,58 +30,54 @@ A Ceph Storage Cluster consists of multiple types of daemons: - :term:`Ceph Manager` - :term:`Ceph Metadata Server` -.. ditaa:: - - +---------------+ +---------------+ +---------------+ +---------------+ - | OSDs | | Monitors | | Managers | | MDS | - +---------------+ +---------------+ +---------------+ +---------------+ +.. _arch_monitor: -A Ceph Monitor maintains a master copy of the cluster map. A cluster of Ceph -monitors ensures high availability should a monitor daemon fail. Storage cluster -clients retrieve a copy of the cluster map from the Ceph Monitor. +Ceph Monitors maintain the master copy of the cluster map, which they provide +to Ceph clients. Provisioning multiple monitors within the Ceph cluster ensures +availability in the event that one of the monitor daemons or its host fails. +The Ceph monitor provides copies of the cluster map to storage cluster clients. A Ceph OSD Daemon checks its own state and the state of other OSDs and reports back to monitors. -A Ceph Manager acts as an endpoint for monitoring, orchestration, and plug-in +A Ceph Manager serves as an endpoint for monitoring, orchestration, and plug-in modules. A Ceph Metadata Server (MDS) manages file metadata when CephFS is used to provide file services. -Storage cluster clients and each :term:`Ceph OSD Daemon` use the CRUSH algorithm -to efficiently compute information about data location, instead of having to -depend on a central lookup table. Ceph's high-level features include a -native interface to the Ceph Storage Cluster via ``librados``, and a number of -service interfaces built on top of ``librados``. - - +Storage cluster clients and :term:`Ceph OSD Daemon`\s use the CRUSH algorithm +to compute information about data location. This means that clients and OSDs +are not bottlenecked by a central lookup table. Ceph's high-level features +include a native interface to the Ceph Storage Cluster via ``librados``, and a +number of service interfaces built on top of ``librados``. Storing Data ------------ The Ceph Storage Cluster receives data from :term:`Ceph Client`\s--whether it comes through a :term:`Ceph Block Device`, :term:`Ceph Object Storage`, the -:term:`Ceph File System` or a custom implementation you create using -``librados``-- which is stored as RADOS objects. Each object is stored on an -:term:`Object Storage Device`. Ceph OSD Daemons handle read, write, and -replication operations on storage drives. With the default BlueStore back end, -objects are stored in a monolithic database-like fashion. +:term:`Ceph File System`, or a custom implementation that you create by using +``librados``. The data received by the Ceph Storage Cluster is stored as RADOS +objects. Each object is stored on an :term:`Object Storage Device` (this is +also called an "OSD"). Ceph OSDs control read, write, and replication +operations on storage drives. The default BlueStore back end stores objects +in a monolithic, database-like fashion. .. ditaa:: - /-----\ +-----+ +-----+ - | obj |------>| {d} |------>| {s} | - \-----/ +-----+ +-----+ + /------\ +-----+ +-----+ + | obj |------>| {d} |------>| {s} | + \------/ +-----+ +-----+ Object OSD Drive -Ceph OSD Daemons store data as objects in a flat namespace (e.g., no -hierarchy of directories). An object has an identifier, binary data, and -metadata consisting of a set of name/value pairs. The semantics are completely -up to :term:`Ceph Client`\s. For example, CephFS uses metadata to store file -attributes such as the file owner, created date, last modified date, and so -forth. +Ceph OSD Daemons store data as objects in a flat namespace. This means that +objects are not stored in a hierarchy of directories. An object has an +identifier, binary data, and metadata consisting of name/value pairs. +:term:`Ceph Client`\s determine the semantics of the object data. For example, +CephFS uses metadata to store file attributes such as the file owner, the +created date, and the last modified date. .. ditaa:: @@ -100,20 +96,23 @@ forth. .. index:: architecture; high availability, scalability +.. _arch_scalability_and_high_availability: + Scalability and High Availability --------------------------------- -In traditional architectures, clients talk to a centralized component (e.g., a -gateway, broker, API, facade, etc.), which acts as a single point of entry to a -complex subsystem. This imposes a limit to both performance and scalability, -while introducing a single point of failure (i.e., if the centralized component -goes down, the whole system goes down, too). +In traditional architectures, clients talk to a centralized component. This +centralized component might be a gateway, a broker, an API, or a facade. A +centralized component of this kind acts as a single point of entry to a complex +subsystem. Architectures that rely upon such a centralized component have a +single point of failure and incur limits to performance and scalability. If +the centralized component goes down, the whole system becomes unavailable. -Ceph eliminates the centralized gateway to enable clients to interact with -Ceph OSD Daemons directly. Ceph OSD Daemons create object replicas on other -Ceph Nodes to ensure data safety and high availability. Ceph also uses a cluster -of monitors to ensure high availability. To eliminate centralization, Ceph -uses an algorithm called CRUSH. +Ceph eliminates this centralized component. This enables clients to interact +with Ceph OSDs directly. Ceph OSDs create object replicas on other Ceph Nodes +to ensure data safety and high availability. Ceph also uses a cluster of +monitors to ensure high availability. To eliminate centralization, Ceph uses an +algorithm called :abbr:`CRUSH (Controlled Replication Under Scalable Hashing)`. .. index:: CRUSH; architecture @@ -122,15 +121,15 @@ CRUSH Introduction ~~~~~~~~~~~~~~~~~~ Ceph Clients and Ceph OSD Daemons both use the :abbr:`CRUSH (Controlled -Replication Under Scalable Hashing)` algorithm to efficiently compute -information about object location, instead of having to depend on a -central lookup table. CRUSH provides a better data management mechanism compared -to older approaches, and enables massive scale by cleanly distributing the work -to all the clients and OSD daemons in the cluster. CRUSH uses intelligent data -replication to ensure resiliency, which is better suited to hyper-scale storage. -The following sections provide additional details on how CRUSH works. For a -detailed discussion of CRUSH, see `CRUSH - Controlled, Scalable, Decentralized -Placement of Replicated Data`_. +Replication Under Scalable Hashing)` algorithm to compute information about +object location instead of relying upon a central lookup table. CRUSH provides +a better data management mechanism than do older approaches, and CRUSH enables +massive scale by distributing the work to all the OSD daemons in the cluster +and all the clients that communicate with them. CRUSH uses intelligent data +replication to ensure resiliency, which is better suited to hyper-scale +storage. The following sections provide additional details on how CRUSH works. +For a detailed discussion of CRUSH, see `CRUSH - Controlled, Scalable, +Decentralized Placement of Replicated Data`_. .. index:: architecture; cluster map @@ -139,61 +138,71 @@ Placement of Replicated Data`_. Cluster Map ~~~~~~~~~~~ -Ceph depends upon Ceph Clients and Ceph OSD Daemons having knowledge of the -cluster topology, which is inclusive of 5 maps collectively referred to as the -"Cluster Map": +In order for a Ceph cluster to function properly, Ceph Clients and Ceph OSDs +must have current information about the cluster's topology. Current information +is stored in the "Cluster Map", which is in fact a collection of five maps. The +five maps that constitute the cluster map are: -#. **The Monitor Map:** Contains the cluster ``fsid``, the position, name - address and port of each monitor. It also indicates the current epoch, - when the map was created, and the last time it changed. To view a monitor - map, execute ``ceph mon dump``. +#. **The Monitor Map:** Contains the cluster ``fsid``, the position, the name, + the address, and the TCP port of each monitor. The monitor map specifies the + current epoch, the time of the monitor map's creation, and the time of the + monitor map's last modification. To view a monitor map, run ``ceph mon + dump``. -#. **The OSD Map:** Contains the cluster ``fsid``, when the map was created and - last modified, a list of pools, replica sizes, PG numbers, a list of OSDs - and their status (e.g., ``up``, ``in``). To view an OSD map, execute - ``ceph osd dump``. +#. **The OSD Map:** Contains the cluster ``fsid``, the time of the OSD map's + creation, the time of the OSD map's last modification, a list of pools, a + list of replica sizes, a list of PG numbers, and a list of OSDs and their + statuses (for example, ``up``, ``in``). To view an OSD map, run ``ceph + osd dump``. -#. **The PG Map:** Contains the PG version, its time stamp, the last OSD - map epoch, the full ratios, and details on each placement group such as - the PG ID, the `Up Set`, the `Acting Set`, the state of the PG (e.g., - ``active + clean``), and data usage statistics for each pool. +#. **The PG Map:** Contains the PG version, its time stamp, the last OSD map + epoch, the full ratios, and the details of each placement group. This + includes the PG ID, the `Up Set`, the `Acting Set`, the state of the PG (for + example, ``active + clean``), and data usage statistics for each pool. #. **The CRUSH Map:** Contains a list of storage devices, the failure domain - hierarchy (e.g., device, host, rack, row, room, etc.), and rules for - traversing the hierarchy when storing data. To view a CRUSH map, execute - ``ceph osd getcrushmap -o {filename}``; then, decompile it by executing - ``crushtool -d {comp-crushmap-filename} -o {decomp-crushmap-filename}``. - You can view the decompiled map in a text editor or with ``cat``. + hierarchy (for example, ``device``, ``host``, ``rack``, ``row``, ``room``), + and rules for traversing the hierarchy when storing data. To view a CRUSH + map, run ``ceph osd getcrushmap -o {filename}`` and then decompile it by + running ``crushtool -d {comp-crushmap-filename} -o + {decomp-crushmap-filename}``. Use a text editor or ``cat`` to view the + decompiled map. #. **The MDS Map:** Contains the current MDS map epoch, when the map was created, and the last time it changed. It also contains the pool for storing metadata, a list of metadata servers, and which metadata servers are ``up`` and ``in``. To view an MDS map, execute ``ceph fs dump``. -Each map maintains an iterative history of its operating state changes. Ceph -Monitors maintain a master copy of the cluster map including the cluster -members, state, changes, and the overall health of the Ceph Storage Cluster. +Each map maintains a history of changes to its operating state. Ceph Monitors +maintain a master copy of the cluster map. This master copy includes the +cluster members, the state of the cluster, changes to the cluster, and +information recording the overall health of the Ceph Storage Cluster. .. index:: high availability; monitor architecture High Availability Monitors ~~~~~~~~~~~~~~~~~~~~~~~~~~ -Before Ceph Clients can read or write data, they must contact a Ceph Monitor -to obtain the most recent copy of the cluster map. A Ceph Storage Cluster -can operate with a single monitor; however, this introduces a single -point of failure (i.e., if the monitor goes down, Ceph Clients cannot -read or write data). +A Ceph Client must contact a Ceph Monitor and obtain a current copy of the +cluster map in order to read data from or to write data to the Ceph cluster. + +It is possible for a Ceph cluster to function properly with only a single +monitor, but a Ceph cluster that has only a single monitor has a single point +of failure: if the monitor goes down, Ceph clients will be unable to read data +from or write data to the cluster. -For added reliability and fault tolerance, Ceph supports a cluster of monitors. -In a cluster of monitors, latency and other faults can cause one or more -monitors to fall behind the current state of the cluster. For this reason, Ceph -must have agreement among various monitor instances regarding the state of the -cluster. Ceph always uses a majority of monitors (e.g., 1, 2:3, 3:5, 4:6, etc.) -and the `Paxos`_ algorithm to establish a consensus among the monitors about the -current state of the cluster. +Ceph leverages a cluster of monitors in order to increase reliability and fault +tolerance. When a cluster of monitors is used, however, one or more of the +monitors in the cluster can fall behind due to latency or other faults. Ceph +mitigates these negative effects by requiring multiple monitor instances to +agree about the state of the cluster. To establish consensus among the monitors +regarding the state of the cluster, Ceph uses the `Paxos`_ algorithm and a +majority of monitors (for example, one in a cluster that contains only one +monitor, two in a cluster that contains three monitors, three in a cluster that +contains five monitors, four in a cluster that contains six monitors, and so +on). -For details on configuring monitors, see the `Monitor Config Reference`_. +See the `Monitor Config Reference`_ for more detail on configuring monitors. .. index:: architecture; high availability authentication @@ -202,48 +211,57 @@ For details on configuring monitors, see the `Monitor Config Reference`_. High Availability Authentication ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To identify users and protect against man-in-the-middle attacks, Ceph provides -its ``cephx`` authentication system to authenticate users and daemons. +The ``cephx`` authentication system is used by Ceph to authenticate users and +daemons and to protect against man-in-the-middle attacks. .. note:: The ``cephx`` protocol does not address data encryption in transport - (e.g., SSL/TLS) or encryption at rest. - -Cephx uses shared secret keys for authentication, meaning both the client and -the monitor cluster have a copy of the client's secret key. The authentication -protocol is such that both parties are able to prove to each other they have a -copy of the key without actually revealing it. This provides mutual -authentication, which means the cluster is sure the user possesses the secret -key, and the user is sure that the cluster has a copy of the secret key. - -A key scalability feature of Ceph is to avoid a centralized interface to the -Ceph object store, which means that Ceph clients must be able to interact with -OSDs directly. To protect data, Ceph provides its ``cephx`` authentication -system, which authenticates users operating Ceph clients. The ``cephx`` protocol -operates in a manner with behavior similar to `Kerberos`_. - -A user/actor invokes a Ceph client to contact a monitor. Unlike Kerberos, each -monitor can authenticate users and distribute keys, so there is no single point -of failure or bottleneck when using ``cephx``. The monitor returns an -authentication data structure similar to a Kerberos ticket that contains a -session key for use in obtaining Ceph services. This session key is itself -encrypted with the user's permanent secret key, so that only the user can -request services from the Ceph Monitor(s). The client then uses the session key -to request its desired services from the monitor, and the monitor provides the -client with a ticket that will authenticate the client to the OSDs that actually -handle data. Ceph Monitors and OSDs share a secret, so the client can use the -ticket provided by the monitor with any OSD or metadata server in the cluster. -Like Kerberos, ``cephx`` tickets expire, so an attacker cannot use an expired -ticket or session key obtained surreptitiously. This form of authentication will -prevent attackers with access to the communications medium from either creating -bogus messages under another user's identity or altering another user's -legitimate messages, as long as the user's secret key is not divulged before it -expires. - -To use ``cephx``, an administrator must set up users first. In the following -diagram, the ``client.admin`` user invokes ``ceph auth get-or-create-key`` from + (for example, SSL/TLS) or encryption at rest. + +``cephx`` uses shared secret keys for authentication. This means that both the +client and the monitor cluster keep a copy of the client's secret key. + +The ``cephx`` protocol makes it possible for each party to prove to the other +that it has a copy of the key without revealing it. This provides mutual +authentication and allows the cluster to confirm (1) that the user has the +secret key and (2) that the user can be confident that the cluster has a copy +of the secret key. + +As stated in :ref:`Scalability and High Availability +`, Ceph does not have any centralized +interface between clients and the Ceph object store. By avoiding such a +centralized interface, Ceph avoids the bottlenecks that attend such centralized +interfaces. However, this means that clients must interact directly with OSDs. +Direct interactions between Ceph clients and OSDs require authenticated +connections. The ``cephx`` authentication system establishes and sustains these +authenticated connections. + +The ``cephx`` protocol operates in a manner similar to `Kerberos`_. + +A user invokes a Ceph client to contact a monitor. Unlike Kerberos, each +monitor can authenticate users and distribute keys, which means that there is +no single point of failure and no bottleneck when using ``cephx``. The monitor +returns an authentication data structure that is similar to a Kerberos ticket. +This authentication data structure contains a session key for use in obtaining +Ceph services. The session key is itself encrypted with the user's permanent +secret key, which means that only the user can request services from the Ceph +Monitors. The client then uses the session key to request services from the +monitors, and the monitors provide the client with a ticket that authenticates +the client against the OSDs that actually handle data. Ceph Monitors and OSDs +share a secret, which means that the clients can use the ticket provided by the +monitors to authenticate against any OSD or metadata server in the cluster. + +Like Kerberos tickets, ``cephx`` tickets expire. An attacker cannot use an +expired ticket or session key that has been obtained surreptitiously. This form +of authentication prevents attackers who have access to the communications +medium from creating bogus messages under another user's identity and prevents +attackers from altering another user's legitimate messages, as long as the +user's secret key is not divulged before it expires. + +An administrator must set up users before using ``cephx``. In the following +diagram, the ``client.admin`` user invokes ``ceph auth get-or-create-key`` from the command line to generate a username and secret key. Ceph's ``auth`` -subsystem generates the username and key, stores a copy with the monitor(s) and -transmits the user's secret back to the ``client.admin`` user. This means that +subsystem generates the username and key, stores a copy on the monitor(s), and +transmits the user's secret back to the ``client.admin`` user. This means that the client and the monitor share a secret key. .. note:: The ``client.admin`` user must provide the user ID and @@ -262,17 +280,16 @@ the client and the monitor share a secret key. | transmit key | | | - -To authenticate with the monitor, the client passes in the user name to the -monitor, and the monitor generates a session key and encrypts it with the secret -key associated to the user name. Then, the monitor transmits the encrypted -ticket back to the client. The client then decrypts the payload with the shared -secret key to retrieve the session key. The session key identifies the user for -the current session. The client then requests a ticket on behalf of the user -signed by the session key. The monitor generates a ticket, encrypts it with the -user's secret key and transmits it back to the client. The client decrypts the -ticket and uses it to sign requests to OSDs and metadata servers throughout the -cluster. +Here is how a client authenticates with a monitor. The client passes the user +name to the monitor. The monitor generates a session key that is encrypted with +the secret key associated with the ``username``. The monitor transmits the +encrypted ticket to the client. The client uses the shared secret key to +decrypt the payload. The session key identifies the user, and this act of +identification will last for the duration of the session. The client requests +a ticket for the user, and the ticket is signed with the session key. The +monitor generates a ticket and uses the user's secret key to encrypt it. The +encrypted ticket is transmitted to the client. The client decrypts the ticket +and uses it to sign requests to OSDs and to metadata servers in the cluster. .. ditaa:: @@ -302,10 +319,11 @@ cluster. |<----+ | -The ``cephx`` protocol authenticates ongoing communications between the client -machine and the Ceph servers. Each message sent between a client and server, -subsequent to the initial authentication, is signed using a ticket that the -monitors, OSDs and metadata servers can verify with their shared secret. +The ``cephx`` protocol authenticates ongoing communications between the clients +and Ceph daemons. After initial authentication, each message sent between a +client and a daemon is signed using a ticket that can be verified by monitors, +OSDs, and metadata daemons. This ticket is verified by using the secret shared +between the client and the daemon. .. ditaa:: @@ -341,83 +359,93 @@ monitors, OSDs and metadata servers can verify with their shared secret. |<-------------------------------------------| receive response -The protection offered by this authentication is between the Ceph client and the -Ceph server hosts. The authentication is not extended beyond the Ceph client. If -the user accesses the Ceph client from a remote host, Ceph authentication is not +This authentication protects only the connections between Ceph clients and Ceph +daemons. The authentication is not extended beyond the Ceph client. If a user +accesses the Ceph client from a remote host, cephx authentication will not be applied to the connection between the user's host and the client host. +See `Cephx Config Guide`_ for more on configuration details. -For configuration details, see `Cephx Config Guide`_. For user management -details, see `User Management`_. +See `User Management`_ for more on user management. +See :ref:`A Detailed Description of the Cephx Authentication Protocol +` for more on the distinction between authorization and +authentication and for a step-by-step explanation of the setup of ``cephx`` +tickets and session keys. .. index:: architecture; smart daemons and scalability Smart Daemons Enable Hyperscale ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In many clustered architectures, the primary purpose of cluster membership is -so that a centralized interface knows which nodes it can access. Then the -centralized interface provides services to the client through a double -dispatch--which is a **huge** bottleneck at the petabyte-to-exabyte scale. - -Ceph eliminates the bottleneck: Ceph's OSD Daemons AND Ceph Clients are cluster -aware. Like Ceph clients, each Ceph OSD Daemon knows about other Ceph OSD -Daemons in the cluster. This enables Ceph OSD Daemons to interact directly with -other Ceph OSD Daemons and Ceph Monitors. Additionally, it enables Ceph Clients -to interact directly with Ceph OSD Daemons. - -The ability of Ceph Clients, Ceph Monitors and Ceph OSD Daemons to interact with -each other means that Ceph OSD Daemons can utilize the CPU and RAM of the Ceph -nodes to easily perform tasks that would bog down a centralized server. The -ability to leverage this computing power leads to several major benefits: - -#. **OSDs Service Clients Directly:** Since any network device has a limit to - the number of concurrent connections it can support, a centralized system - has a low physical limit at high scales. By enabling Ceph Clients to contact - Ceph OSD Daemons directly, Ceph increases both performance and total system - capacity simultaneously, while removing a single point of failure. Ceph - Clients can maintain a session when they need to, and with a particular Ceph - OSD Daemon instead of a centralized server. - -#. **OSD Membership and Status**: Ceph OSD Daemons join a cluster and report - on their status. At the lowest level, the Ceph OSD Daemon status is ``up`` - or ``down`` reflecting whether or not it is running and able to service - Ceph Client requests. If a Ceph OSD Daemon is ``down`` and ``in`` the Ceph - Storage Cluster, this status may indicate the failure of the Ceph OSD - Daemon. If a Ceph OSD Daemon is not running (e.g., it crashes), the Ceph OSD - Daemon cannot notify the Ceph Monitor that it is ``down``. The OSDs - periodically send messages to the Ceph Monitor (``MPGStats`` pre-luminous, - and a new ``MOSDBeacon`` in luminous). If the Ceph Monitor doesn't see that - message after a configurable period of time then it marks the OSD down. - This mechanism is a failsafe, however. Normally, Ceph OSD Daemons will - determine if a neighboring OSD is down and report it to the Ceph Monitor(s). - This assures that Ceph Monitors are lightweight processes. See `Monitoring - OSDs`_ and `Heartbeats`_ for additional details. - -#. **Data Scrubbing:** As part of maintaining data consistency and cleanliness, - Ceph OSD Daemons can scrub objects. That is, Ceph OSD Daemons can compare - their local objects metadata with its replicas stored on other OSDs. Scrubbing - happens on a per-Placement Group base. Scrubbing (usually performed daily) - catches mismatches in size and other metadata. Ceph OSD Daemons also perform deeper - scrubbing by comparing data in objects bit-for-bit with their checksums. - Deep scrubbing (usually performed weekly) finds bad sectors on a drive that - weren't apparent in a light scrub. See `Data Scrubbing`_ for details on - configuring scrubbing. - -#. **Replication:** Like Ceph Clients, Ceph OSD Daemons use the CRUSH - algorithm, but the Ceph OSD Daemon uses it to compute where replicas of - objects should be stored (and for rebalancing). In a typical write scenario, - a client uses the CRUSH algorithm to compute where to store an object, maps - the object to a pool and placement group, then looks at the CRUSH map to - identify the primary OSD for the placement group. - - The client writes the object to the identified placement group in the - primary OSD. Then, the primary OSD with its own copy of the CRUSH map - identifies the secondary and tertiary OSDs for replication purposes, and - replicates the object to the appropriate placement groups in the secondary - and tertiary OSDs (as many OSDs as additional replicas), and responds to the - client once it has confirmed the object was stored successfully. +A feature of many storage clusters is a centralized interface that keeps track +of the nodes that clients are permitted to access. Such centralized +architectures provide services to clients by means of a double dispatch. At the +petabyte-to-exabyte scale, such double dispatches are a significant +bottleneck. + +Ceph obviates this bottleneck: Ceph's OSD Daemons AND Ceph clients are +cluster-aware. Like Ceph clients, each Ceph OSD Daemon is aware of other Ceph +OSD Daemons in the cluster. This enables Ceph OSD Daemons to interact directly +with other Ceph OSD Daemons and to interact directly with Ceph Monitors. Being +cluster-aware makes it possible for Ceph clients to interact directly with Ceph +OSD Daemons. + +Because Ceph clients, Ceph monitors, and Ceph OSD daemons interact with one +another directly, Ceph OSD daemons can make use of the aggregate CPU and RAM +resources of the nodes in the Ceph cluster. This means that a Ceph cluster can +easily perform tasks that a cluster with a centralized interface would struggle +to perform. The ability of Ceph nodes to make use of the computing power of +the greater cluster provides several benefits: + +#. **OSDs Service Clients Directly:** Network devices can support only a + limited number of concurrent connections. Because Ceph clients contact + Ceph OSD daemons directly without first connecting to a central interface, + Ceph enjoys improved perfomance and increased system capacity relative to + storage redundancy strategies that include a central interface. Ceph clients + maintain sessions only when needed, and maintain those sessions with only + particular Ceph OSD daemons, not with a centralized interface. + +#. **OSD Membership and Status**: When Ceph OSD Daemons join a cluster, they + report their status. At the lowest level, the Ceph OSD Daemon status is + ``up`` or ``down``: this reflects whether the Ceph OSD daemon is running and + able to service Ceph Client requests. If a Ceph OSD Daemon is ``down`` and + ``in`` the Ceph Storage Cluster, this status may indicate the failure of the + Ceph OSD Daemon. If a Ceph OSD Daemon is not running because it has crashed, + the Ceph OSD Daemon cannot notify the Ceph Monitor that it is ``down``. The + OSDs periodically send messages to the Ceph Monitor (in releases prior to + Luminous, this was done by means of ``MPGStats``, and beginning with the + Luminous release, this has been done with ``MOSDBeacon``). If the Ceph + Monitors receive no such message after a configurable period of time, + then they mark the OSD ``down``. This mechanism is a failsafe, however. + Normally, Ceph OSD Daemons determine if a neighboring OSD is ``down`` and + report it to the Ceph Monitors. This contributes to making Ceph Monitors + lightweight processes. See `Monitoring OSDs`_ and `Heartbeats`_ for + additional details. + +#. **Data Scrubbing:** To maintain data consistency, Ceph OSD Daemons scrub + RADOS objects. Ceph OSD Daemons compare the metadata of their own local + objects against the metadata of the replicas of those objects, which are + stored on other OSDs. Scrubbing occurs on a per-Placement-Group basis, finds + mismatches in object size and finds metadata mismatches, and is usually + performed daily. Ceph OSD Daemons perform deeper scrubbing by comparing the + data in objects, bit-for-bit, against their checksums. Deep scrubbing finds + bad sectors on drives that are not detectable with light scrubs. See `Data + Scrubbing`_ for details on configuring scrubbing. + +#. **Replication:** Data replication involves a collaboration between Ceph + Clients and Ceph OSD Daemons. Ceph OSD Daemons use the CRUSH algorithm to + determine the storage location of object replicas. Ceph clients use the + CRUSH algorithm to determine the storage location of an object, then the + object is mapped to a pool and to a placement group, and then the client + consults the CRUSH map to identify the placement group's primary OSD. + + After identifying the target placement group, the client writes the object + to the identified placement group's primary OSD. The primary OSD then + consults its own copy of the CRUSH map to identify secondary and tertiary + OSDS, replicates the object to the placement groups in those secondary and + tertiary OSDs, confirms that the object was stored successfully in the + secondary and tertiary OSDs, and reports to the client that the object + was stored successfully. .. ditaa:: @@ -444,19 +472,18 @@ ability to leverage this computing power leads to several major benefits: | | | | +---------------+ +---------------+ -With the ability to perform data replication, Ceph OSD Daemons relieve Ceph -clients from that duty, while ensuring high data availability and data safety. - +By performing this act of data replication, Ceph OSD Daemons relieve Ceph +clients of the burden of replicating data. Dynamic Cluster Management -------------------------- In the `Scalability and High Availability`_ section, we explained how Ceph uses -CRUSH, cluster awareness and intelligent daemons to scale and maintain high +CRUSH, cluster topology, and intelligent daemons to scale and maintain high availability. Key to Ceph's design is the autonomous, self-healing, and intelligent Ceph OSD Daemon. Let's take a deeper look at how CRUSH works to -enable modern cloud storage infrastructures to place data, rebalance the cluster -and recover from faults dynamically. +enable modern cloud storage infrastructures to place data, rebalance the +cluster, and adaptively place and balance data and recover from faults. .. index:: architecture; pools @@ -465,10 +492,11 @@ About Pools The Ceph storage system supports the notion of 'Pools', which are logical partitions for storing objects. - -Ceph Clients retrieve a `Cluster Map`_ from a Ceph Monitor, and write objects to -pools. The pool's ``size`` or number of replicas, the CRUSH rule and the -number of placement groups determine how Ceph will place the data. + +Ceph Clients retrieve a `Cluster Map`_ from a Ceph Monitor, and write RADOS +objects to pools. The way that Ceph places the data in the pools is determined +by the pool's ``size`` or number of replicas, the CRUSH rule, and the number of +placement groups in the pool. .. ditaa:: @@ -501,20 +529,23 @@ See `Set Pool Values`_ for details. Mapping PGs to OSDs ~~~~~~~~~~~~~~~~~~~ -Each pool has a number of placement groups. CRUSH maps PGs to OSDs dynamically. -When a Ceph Client stores objects, CRUSH will map each object to a placement -group. - -Mapping objects to placement groups creates a layer of indirection between the -Ceph OSD Daemon and the Ceph Client. The Ceph Storage Cluster must be able to -grow (or shrink) and rebalance where it stores objects dynamically. If the Ceph -Client "knew" which Ceph OSD Daemon had which object, that would create a tight -coupling between the Ceph Client and the Ceph OSD Daemon. Instead, the CRUSH -algorithm maps each object to a placement group and then maps each placement -group to one or more Ceph OSD Daemons. This layer of indirection allows Ceph to -rebalance dynamically when new Ceph OSD Daemons and the underlying OSD devices -come online. The following diagram depicts how CRUSH maps objects to placement -groups, and placement groups to OSDs. +Each pool has a number of placement groups (PGs) within it. CRUSH dynamically +maps PGs to OSDs. When a Ceph Client stores objects, CRUSH maps each RADOS +object to a PG. + +This mapping of RADOS objects to PGs implements an abstraction and indirection +layer between Ceph OSD Daemons and Ceph Clients. The Ceph Storage Cluster must +be able to grow (or shrink) and redistribute data adaptively when the internal +topology changes. + +If the Ceph Client "knew" which Ceph OSD Daemons were storing which objects, a +tight coupling would exist between the Ceph Client and the Ceph OSD Daemon. +But Ceph avoids any such tight coupling. Instead, the CRUSH algorithm maps each +RADOS object to a placement group and then maps each placement group to one or +more Ceph OSD Daemons. This "layer of indirection" allows Ceph to rebalance +dynamically when new Ceph OSD Daemons and their underlying OSD devices come +online. The following diagram shows how the CRUSH algorithm maps objects to +placement groups, and how it maps placement groups to OSDs. .. ditaa:: @@ -540,44 +571,45 @@ groups, and placement groups to OSDs. | | | | | | | | \----------/ \----------/ \----------/ \----------/ -With a copy of the cluster map and the CRUSH algorithm, the client can compute -exactly which OSD to use when reading or writing a particular object. +The client uses its copy of the cluster map and the CRUSH algorithm to compute +precisely which OSD it will use when reading or writing a particular object. .. index:: architecture; calculating PG IDs Calculating PG IDs ~~~~~~~~~~~~~~~~~~ -When a Ceph Client binds to a Ceph Monitor, it retrieves the latest copy of the -`Cluster Map`_. With the cluster map, the client knows about all of the monitors, -OSDs, and metadata servers in the cluster. **However, it doesn't know anything -about object locations.** - -.. epigraph:: - - Object locations get computed. - - -The only input required by the client is the object ID and the pool. -It's simple: Ceph stores data in named pools (e.g., "liverpool"). When a client -wants to store a named object (e.g., "john," "paul," "george," "ringo", etc.) -it calculates a placement group using the object name, a hash code, the -number of PGs in the pool and the pool name. Ceph clients use the following -steps to compute PG IDs. - -#. The client inputs the pool name and the object ID. (e.g., pool = "liverpool" - and object-id = "john") -#. Ceph takes the object ID and hashes it. -#. Ceph calculates the hash modulo the number of PGs. (e.g., ``58``) to get - a PG ID. -#. Ceph gets the pool ID given the pool name (e.g., "liverpool" = ``4``) -#. Ceph prepends the pool ID to the PG ID (e.g., ``4.58``). - -Computing object locations is much faster than performing object location query -over a chatty session. The :abbr:`CRUSH (Controlled Replication Under Scalable -Hashing)` algorithm allows a client to compute where objects *should* be stored, -and enables the client to contact the primary OSD to store or retrieve the -objects. +When a Ceph Client binds to a Ceph Monitor, it retrieves the latest version of +the `Cluster Map`_. When a client has been equipped with a copy of the cluster +map, it is aware of all the monitors, OSDs, and metadata servers in the +cluster. **However, even equipped with a copy of the latest version of the +cluster map, the client doesn't know anything about object locations.** + +**Object locations must be computed.** + +The client requies only the object ID and the name of the pool in order to +compute the object location. + +Ceph stores data in named pools (for example, "liverpool"). When a client +stores a named object (for example, "john", "paul", "george", or "ringo") it +calculates a placement group by using the object name, a hash code, the number +of PGs in the pool, and the pool name. Ceph clients use the following steps to +compute PG IDs. + +#. The client inputs the pool name and the object ID. (for example: pool = + "liverpool" and object-id = "john") +#. Ceph hashes the object ID. +#. Ceph calculates the hash, modulo the number of PGs (for example: ``58``), to + get a PG ID. +#. Ceph uses the pool name to retrieve the pool ID: (for example: "liverpool" = + ``4``) +#. Ceph prepends the pool ID to the PG ID (for example: ``4.58``). + +It is much faster to compute object locations than to perform object location +query over a chatty session. The :abbr:`CRUSH (Controlled Replication Under +Scalable Hashing)` algorithm allows a client to compute where objects are +expected to be stored, and enables the client to contact the primary OSD to +store or retrieve the objects. .. index:: architecture; PG Peering @@ -585,46 +617,51 @@ Peering and Sets ~~~~~~~~~~~~~~~~ In previous sections, we noted that Ceph OSD Daemons check each other's -heartbeats and report back to the Ceph Monitor. Another thing Ceph OSD daemons -do is called 'peering', which is the process of bringing all of the OSDs that -store a Placement Group (PG) into agreement about the state of all of the -objects (and their metadata) in that PG. In fact, Ceph OSD Daemons `Report -Peering Failure`_ to the Ceph Monitors. Peering issues usually resolve -themselves; however, if the problem persists, you may need to refer to the -`Troubleshooting Peering Failure`_ section. +heartbeats and report back to Ceph Monitors. Ceph OSD daemons also 'peer', +which is the process of bringing all of the OSDs that store a Placement Group +(PG) into agreement about the state of all of the RADOS objects (and their +metadata) in that PG. Ceph OSD Daemons `Report Peering Failure`_ to the Ceph +Monitors. Peering issues usually resolve themselves; however, if the problem +persists, you may need to refer to the `Troubleshooting Peering Failure`_ +section. -.. Note:: Agreeing on the state does not mean that the PGs have the latest contents. +.. Note:: PGs that agree on the state of the cluster do not necessarily have + the current data yet. The Ceph Storage Cluster was designed to store at least two copies of an object -(i.e., ``size = 2``), which is the minimum requirement for data safety. For high -availability, a Ceph Storage Cluster should store more than two copies of an object -(e.g., ``size = 3`` and ``min size = 2``) so that it can continue to run in a -``degraded`` state while maintaining data safety. - -Referring back to the diagram in `Smart Daemons Enable Hyperscale`_, we do not -name the Ceph OSD Daemons specifically (e.g., ``osd.0``, ``osd.1``, etc.), but -rather refer to them as *Primary*, *Secondary*, and so forth. By convention, -the *Primary* is the first OSD in the *Acting Set*, and is responsible for -coordinating the peering process for each placement group where it acts as -the *Primary*, and is the **ONLY** OSD that will accept client-initiated -writes to objects for a given placement group where it acts as the *Primary*. - -When a series of OSDs are responsible for a placement group, that series of -OSDs, we refer to them as an *Acting Set*. An *Acting Set* may refer to the Ceph -OSD Daemons that are currently responsible for the placement group, or the Ceph -OSD Daemons that were responsible for a particular placement group as of some +(that is, ``size = 2``), which is the minimum requirement for data safety. For +high availability, a Ceph Storage Cluster should store more than two copies of +an object (that is, ``size = 3`` and ``min size = 2``) so that it can continue +to run in a ``degraded`` state while maintaining data safety. + +.. warning:: Although we say here that R2 (replication with two copies) is the + minimum requirement for data safety, R3 (replication with three copies) is + recommended. On a long enough timeline, data stored with an R2 strategy will + be lost. + +As explained in the diagram in `Smart Daemons Enable Hyperscale`_, we do not +name the Ceph OSD Daemons specifically (for example, ``osd.0``, ``osd.1``, +etc.), but rather refer to them as *Primary*, *Secondary*, and so forth. By +convention, the *Primary* is the first OSD in the *Acting Set*, and is +responsible for orchestrating the peering process for each placement group +where it acts as the *Primary*. The *Primary* is the **ONLY** OSD in a given +placement group that accepts client-initiated writes to objects. + +The set of OSDs that is responsible for a placement group is called the +*Acting Set*. The term "*Acting Set*" can refer either to the Ceph OSD Daemons +that are currently responsible for the placement group, or to the Ceph OSD +Daemons that were responsible for a particular placement group as of some epoch. -The Ceph OSD daemons that are part of an *Acting Set* may not always be ``up``. -When an OSD in the *Acting Set* is ``up``, it is part of the *Up Set*. The *Up -Set* is an important distinction, because Ceph can remap PGs to other Ceph OSD -Daemons when an OSD fails. - -.. note:: In an *Acting Set* for a PG containing ``osd.25``, ``osd.32`` and - ``osd.61``, the first OSD, ``osd.25``, is the *Primary*. If that OSD fails, - the Secondary, ``osd.32``, becomes the *Primary*, and ``osd.25`` will be - removed from the *Up Set*. +The Ceph OSD daemons that are part of an *Acting Set* might not always be +``up``. When an OSD in the *Acting Set* is ``up``, it is part of the *Up Set*. +The *Up Set* is an important distinction, because Ceph can remap PGs to other +Ceph OSD Daemons when an OSD fails. +.. note:: Consider a hypothetical *Acting Set* for a PG that contains + ``osd.25``, ``osd.32`` and ``osd.61``. The first OSD (``osd.25``), is the + *Primary*. If that OSD fails, the Secondary (``osd.32``), becomes the + *Primary*, and ``osd.25`` is removed from the *Up Set*. .. index:: architecture; Rebalancing @@ -1469,11 +1506,11 @@ Ceph Clients Ceph Clients include a number of service interfaces. These include: -- **Block Devices:** The :term:`Ceph Block Device` (a.k.a., RBD) service - provides resizable, thin-provisioned block devices with snapshotting and - cloning. Ceph stripes a block device across the cluster for high - performance. Ceph supports both kernel objects (KO) and a QEMU hypervisor - that uses ``librbd`` directly--avoiding the kernel object overhead for +- **Block Devices:** The :term:`Ceph Block Device` (a.k.a., RBD) service + provides resizable, thin-provisioned block devices that can be snapshotted + and cloned. Ceph stripes a block device across the cluster for high + performance. Ceph supports both kernel objects (KO) and a QEMU hypervisor + that uses ``librbd`` directly--avoiding the kernel object overhead for virtualized systems. - **Object Storage:** The :term:`Ceph Object Storage` (a.k.a., RGW) service diff --git a/ceph/doc/ceph-volume/lvm/activate.rst b/ceph/doc/ceph-volume/lvm/activate.rst index d5129def1..fe34ecb71 100644 --- a/ceph/doc/ceph-volume/lvm/activate.rst +++ b/ceph/doc/ceph-volume/lvm/activate.rst @@ -3,18 +3,20 @@ ``activate`` ============ -Once :ref:`ceph-volume-lvm-prepare` is completed, and all the various steps -that entails are done, the volume is ready to get "activated". +After :ref:`ceph-volume-lvm-prepare` has completed its run, the volume can be +activated. -This activation process enables a systemd unit that persists the OSD ID and its -UUID (also called ``fsid`` in Ceph CLI tools), so that at boot time it can -understand what OSD is enabled and needs to be mounted. +Activating the volume involves enabling a ``systemd`` unit that persists the +``OSD ID`` and its ``UUID`` (which is also called the ``fsid`` in the Ceph CLI +tools). After this information has been persisted, the cluster can determine +which OSD is enabled and must be mounted. -.. note:: The execution of this call is fully idempotent, and there is no - side-effects when running multiple times +.. note:: The execution of this call is fully idempotent. This means that the + call can be executed multiple times without changing the result of its first + successful execution. -For OSDs deployed by cephadm, please refer to :ref:`cephadm-osd-activate` -instead. +For information about OSDs deployed by cephadm, refer to +:ref:`cephadm-osd-activate`. New OSDs -------- diff --git a/ceph/doc/cephadm/host-management.rst b/ceph/doc/cephadm/host-management.rst index 189e16abe..4b964c5f4 100644 --- a/ceph/doc/cephadm/host-management.rst +++ b/ceph/doc/cephadm/host-management.rst @@ -101,8 +101,19 @@ To drain all daemons from a host, run a command of the following form: ceph orch host drain ** -The ``_no_schedule`` label will be applied to the host. See -:ref:`cephadm-special-host-labels`. +The ``_no_schedule`` and ``_no_conf_keyring`` labels will be applied to the +host. See :ref:`cephadm-special-host-labels`. + +If you only want to drain daemons but leave managed ceph conf and keyring +files on the host, you may pass the ``--keep-conf-keyring`` flag to the +drain command. + +.. prompt:: bash # + + ceph orch host drain ** --keep-conf-keyring + +This will apply the ``_no_schedule`` label to the host but not the +``_no_conf_keyring`` label. All OSDs on the host will be scheduled to be removed. You can check the progress of the OSD removal operation with the following command: @@ -112,6 +123,14 @@ All OSDs on the host will be scheduled to be removed. You can check the progress See :ref:`cephadm-osd-removal` for more details about OSD removal. +The ``orch host drain`` command also supports a ``--zap-osd-devices`` +flag. Setting this flag while draining a host will cause cephadm to zap +the devices of the OSDs it is removing as part of the drain process + +.. prompt:: bash # + + ceph orch host drain ** --zap-osd-devices + Use the following command to determine whether any daemons are still on the host: @@ -183,6 +202,12 @@ The following host labels have a special meaning to cephadm. All start with ``_ an existing host that already contains Ceph daemons, it will cause cephadm to move those daemons elsewhere (except OSDs, which are not removed automatically). +* ``_no_conf_keyring``: *Do not deploy config files or keyrings on this host*. + + This label is effectively the same as ``_no_schedule`` but instead of working for + daemons it works for client keyrings and ceph conf files that are being managed + by cephadm + * ``_no_autotune_memory``: *Do not autotune memory on this host*. This label will prevent daemon memory from being tuned even when the @@ -290,7 +315,7 @@ create a new CRUSH host located in the specified hierarchy. .. note:: The ``location`` attribute will be only affect the initial CRUSH location. Subsequent - changes of the ``location`` property will be ignored. Also, removing a host will no remove + changes of the ``location`` property will be ignored. Also, removing a host will not remove any CRUSH buckets. See also :ref:`crush_map_default_types`. @@ -505,7 +530,23 @@ There are two ways to customize this configuration for your environment: manually distributed to the mgr data directory (``/var/lib/ceph//mgr.`` on the host, visible at ``/var/lib/ceph/mgr/ceph-`` from inside the container). - + +Setting up CA signed keys for the cluster +----------------------------------------- + +Cephadm also supports using CA signed keys for SSH authentication +across cluster nodes. In this setup, instead of needing a private +key and public key, we instead need a private key and certificate +created by signing that private key with a CA key. For more info +on setting up nodes for authentication using a CA signed key, see +:ref:`cephadm-bootstrap-ca-signed-keys`. Once you have your private +key and signed cert, they can be set up for cephadm to use by running: + +.. prompt:: bash # + + ceph config-key set mgr/cephadm/ssh_identity_key -i + ceph config-key set mgr/cephadm/ssh_identity_cert -i + .. _cephadm-fqdn: Fully qualified domain names vs bare host names diff --git a/ceph/doc/cephadm/install.rst b/ceph/doc/cephadm/install.rst index 58c997fda..b1aa736e2 100644 --- a/ceph/doc/cephadm/install.rst +++ b/ceph/doc/cephadm/install.rst @@ -50,8 +50,8 @@ There are two ways to install ``cephadm``: distribution-specific installations ----------------------------------- -Some Linux distributions may already include up-to-date Ceph packages. In that -case, you can install cephadm directly. For example: +Some Linux distributions may already include up-to-date Ceph packages. In +that case, you can install cephadm directly. For example: In Ubuntu: @@ -87,7 +87,7 @@ curl-based installation * First, determine what version of Ceph you will need. You can use the releases page to find the `latest active releases `_. - For example, we might look at that page and find that ``17.2.6`` is the latest + For example, we might look at that page and find that ``18.2.0`` is the latest active release. * Use ``curl`` to fetch a build of cephadm for that release. @@ -95,7 +95,7 @@ curl-based installation .. prompt:: bash # :substitutions: - CEPH_RELEASE=17.2.6 # replace this with the active release + CEPH_RELEASE=18.2.0 # replace this with the active release curl --silent --remote-name --location https://download.ceph.com/rpm-${CEPH_RELEASE}/el9/noarch/cephadm Ensure the ``cephadm`` file is executable: @@ -121,28 +121,41 @@ curl-based installation python3.8 ./cephadm -* Although the standalone cephadm is sufficient to get a cluster started, it is - convenient to have the ``cephadm`` command installed on the host. To install - the packages that provide the ``cephadm`` command, run the following - commands: +.. _cephadm_update: - .. prompt:: bash # - :substitutions: +update cephadm +-------------- - ./cephadm add-repo --release |stable-release| - ./cephadm install +The cephadm binary can be used to bootstrap a cluster and for a variety +of other management and debugging tasks. The Ceph team strongly recommends +using an actively supported version of cephadm. Additionally, although +the standalone cephadm is sufficient to get a cluster started, it is +convenient to have the ``cephadm`` command installed on the host. Older or LTS +distros may also have ``cephadm`` packages that are out-of-date and +running the commands below can help install a more recent version +from the Ceph project's repositories. - Confirm that ``cephadm`` is now in your PATH by running ``which``: +To install the packages provided by the Ceph project that provide the +``cephadm`` command, run the following commands: - .. prompt:: bash # +.. prompt:: bash # + :substitutions: - which cephadm + ./cephadm add-repo --release |stable-release| + ./cephadm install - A successful ``which cephadm`` command will return this: +Confirm that ``cephadm`` is now in your PATH by running ``which`` or +``command -v``: - .. code-block:: bash +.. prompt:: bash # - /usr/sbin/cephadm + which cephadm + +A successful ``which cephadm`` command will return this: + +.. code-block:: bash + + /usr/sbin/cephadm Bootstrap a new cluster ======================= @@ -157,6 +170,9 @@ cluster's first "monitor daemon", and that monitor daemon needs an IP address. You must pass the IP address of the Ceph cluster's first host to the ``ceph bootstrap`` command, so you'll need to know the IP address of that host. +.. important:: ``ssh`` must be installed and running in order for the + bootstrapping procedure to succeed. + .. note:: If there are multiple networks and interfaces, be sure to choose one that will be accessible by any host accessing the Ceph cluster. @@ -184,6 +200,8 @@ This command will: with this label will (also) get a copy of ``/etc/ceph/ceph.conf`` and ``/etc/ceph/ceph.client.admin.keyring``. +.. _cephadm-bootstrap-further-info: + Further information about cephadm bootstrap ------------------------------------------- @@ -303,18 +321,21 @@ its status with: Adding Hosts ============ -Next, add all hosts to the cluster by following :ref:`cephadm-adding-hosts`. +Add all hosts to the cluster by following the instructions in +:ref:`cephadm-adding-hosts`. -By default, a ``ceph.conf`` file and a copy of the ``client.admin`` keyring -are maintained in ``/etc/ceph`` on all hosts with the ``_admin`` label, which is initially -applied only to the bootstrap host. We usually recommend that one or more other hosts be -given the ``_admin`` label so that the Ceph CLI (e.g., via ``cephadm shell``) is easily -accessible on multiple hosts. To add the ``_admin`` label to additional host(s): +By default, a ``ceph.conf`` file and a copy of the ``client.admin`` keyring are +maintained in ``/etc/ceph`` on all hosts that have the ``_admin`` label. This +label is initially applied only to the bootstrap host. We usually recommend +that one or more other hosts be given the ``_admin`` label so that the Ceph CLI +(for example, via ``cephadm shell``) is easily accessible on multiple hosts. To add +the ``_admin`` label to additional host(s), run a command of the following form: .. prompt:: bash # ceph orch host label add ** _admin + Adding additional MONs ====================== @@ -454,3 +475,104 @@ have access to all hosts that you plan to add to the cluster. cephadm --image **:5000/ceph/ceph bootstrap --mon-ip ** .. _cluster network: ../rados/configuration/network-config-ref#cluster-network + +.. _cephadm-bootstrap-custom-ssh-keys: + +Deployment with custom SSH keys +------------------------------- + +Bootstrap allows users to create their own private/public SSH key pair +rather than having cephadm generate them automatically. + +To use custom SSH keys, pass the ``--ssh-private-key`` and ``--ssh-public-key`` +fields to bootstrap. Both parameters require a path to the file where the +keys are stored: + +.. prompt:: bash # + + cephadm bootstrap --mon-ip --ssh-private-key --ssh-public-key + +This setup allows users to use a key that has already been distributed to hosts +the user wants in the cluster before bootstrap. + +.. note:: In order for cephadm to connect to other hosts you'd like to add + to the cluster, make sure the public key of the key pair provided is set up + as an authorized key for the ssh user being used, typically root. If you'd + like more info on using a non-root user as the ssh user, see :ref:`cephadm-bootstrap-further-info` + +.. _cephadm-bootstrap-ca-signed-keys: + +Deployment with CA signed SSH keys +---------------------------------- + +As an alternative to standard public key authentication, cephadm also supports +deployment using CA signed keys. Before bootstrapping it's recommended to set up +the CA public key as a trusted CA key on hosts you'd like to eventually add to +the cluster. For example: + +.. prompt:: bash + + # we will act as our own CA, therefore we'll need to make a CA key + [root@host1 ~]# ssh-keygen -t rsa -f ca-key -N "" + + # make the ca key trusted on the host we've generated it on + # this requires adding in a line in our /etc/sshd_config + # to mark this key as trusted + [root@host1 ~]# cp ca-key.pub /etc/ssh + [root@host1 ~]# vi /etc/ssh/sshd_config + [root@host1 ~]# cat /etc/ssh/sshd_config | grep ca-key + TrustedUserCAKeys /etc/ssh/ca-key.pub + # now restart sshd so it picks up the config change + [root@host1 ~]# systemctl restart sshd + + # now, on all other hosts we want in the cluster, also install the CA key + [root@host1 ~]# scp /etc/ssh/ca-key.pub host2:/etc/ssh/ + + # on other hosts, make the same changes to the sshd_config + [root@host2 ~]# vi /etc/ssh/sshd_config + [root@host2 ~]# cat /etc/ssh/sshd_config | grep ca-key + TrustedUserCAKeys /etc/ssh/ca-key.pub + # and restart sshd so it picks up the config change + [root@host2 ~]# systemctl restart sshd + +Once the CA key has been installed and marked as a trusted key, you are ready +to use a private key/CA signed cert combination for SSH. Continuing with our +current example, we will create a new key-pair for for host access and then +sign it with our CA key + +.. prompt:: bash + + # make a new key pair + [root@host1 ~]# ssh-keygen -t rsa -f cephadm-ssh-key -N "" + # sign the private key. This will create a new cephadm-ssh-key-cert.pub + # note here we're using user "root". If you'd like to use a non-root + # user the arguments to the -I and -n params would need to be adjusted + # Additionally, note the -V param indicates how long until the cert + # this creates will expire + [root@host1 ~]# ssh-keygen -s ca-key -I user_root -n root -V +52w cephadm-ssh-key + [root@host1 ~]# ls + ca-key ca-key.pub cephadm-ssh-key cephadm-ssh-key-cert.pub cephadm-ssh-key.pub + + # verify our signed key is working. To do this, make sure the generated private + # key ("cephadm-ssh-key" in our example) and the newly signed cert are stored + # in the same directory. Then try to ssh using the private key + [root@host1 ~]# ssh -i cephadm-ssh-key host2 + +Once you have your private key and corresponding CA signed cert and have tested +SSH authentication using that key works, you can pass those keys to bootstrap +in order to have cephadm use them for SSHing between cluster hosts + +.. prompt:: bash + + [root@host1 ~]# cephadm bootstrap --mon-ip --ssh-private-key cephadm-ssh-key --ssh-signed-cert cephadm-ssh-key-cert.pub + +Note that this setup does not require installing the corresponding public key +from the private key passed to bootstrap on other nodes. In fact, cephadm will +reject the ``--ssh-public-key`` argument when passed along with ``--ssh-signed-cert``. +Not because having the public key breaks anything, but because it is not at all needed +for this setup and it helps bootstrap differentiate if the user wants the CA signed +keys setup or standard pubkey encryption. What this means is, SSH key rotation +would simply be a matter of getting another key signed by the same CA and providing +cephadm with the new private key and signed cert. No additional distribution of +keys to cluster nodes is needed after the initial setup of the CA key as a trusted key, +no matter how many new private key/signed cert pairs are rotated in. diff --git a/ceph/doc/cephadm/services/index.rst b/ceph/doc/cephadm/services/index.rst index 6596a8acd..82f83bfac 100644 --- a/ceph/doc/cephadm/services/index.rst +++ b/ceph/doc/cephadm/services/index.rst @@ -541,13 +541,60 @@ a spec like which would cause each mon daemon to be deployed with `--cpus=2`. +There are two ways to express arguments in the ``extra_container_args`` list. +To start, an item in the list can be a string. When passing an argument +as a string and the string contains spaces, Cephadm will automatically split it +into multiple arguments. For example, ``--cpus 2`` would become ``["--cpus", +"2"]`` when processed. Example: + +.. code-block:: yaml + + service_type: mon + service_name: mon + placement: + hosts: + - host1 + - host2 + - host3 + extra_container_args: + - "--cpus 2" + +As an alternative, an item in the list can be an object (mapping) containing +the required key "argument" and an optional key "split". The value associated +with the ``argument`` key must be a single string. The value associated with +the ``split`` key is a boolean value. The ``split`` key explicitly controls if +spaces in the argument value cause the value to be split into multiple +arguments. If ``split`` is true then Cephadm will automatically split the value +into multiple arguments. If ``split`` is false then spaces in the value will +be retained in the argument. The default, when ``split`` is not provided, is +false. Examples: + +.. code-block:: yaml + + service_type: mon + service_name: mon + placement: + hosts: + - tiebreaker + extra_container_args: + # No spaces, always treated as a single argument + - argument: "--timout=3000" + # Splitting explicitly disabled, one single argument + - argument: "--annotation=com.example.name=my favorite mon" + split: false + # Splitting explicitly enabled, will become two arguments + - argument: "--cpuset-cpus 1-3,7-11" + split: true + # Splitting implicitly disabled, one single argument + - argument: "--annotation=com.example.note=a simple example" + Mounting Files with Extra Container Arguments --------------------------------------------- A common use case for extra container arguments is to mount additional -files within the container. However, some intuitive formats for doing -so can cause deployment to fail (see https://tracker.ceph.com/issues/57338). -The recommended syntax for mounting a file with extra container arguments is: +files within the container. Older versions of Ceph did not support spaces +in arguments and therefore the examples below apply to the widest range +of Ceph versions. .. code-block:: yaml @@ -587,6 +634,55 @@ the node-exporter service , one could apply a service spec like extra_entrypoint_args: - "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector2" +There are two ways to express arguments in the ``extra_entrypoint_args`` list. +To start, an item in the list can be a string. When passing an argument as a +string and the string contains spaces, cephadm will automatically split it into +multiple arguments. For example, ``--debug_ms 10`` would become +``["--debug_ms", "10"]`` when processed. Example: + +.. code-block:: yaml + + service_type: mon + service_name: mon + placement: + hosts: + - host1 + - host2 + - host3 + extra_entrypoint_args: + - "--debug_ms 2" + +As an alternative, an item in the list can be an object (mapping) containing +the required key "argument" and an optional key "split". The value associated +with the ``argument`` key must be a single string. The value associated with +the ``split`` key is a boolean value. The ``split`` key explicitly controls if +spaces in the argument value cause the value to be split into multiple +arguments. If ``split`` is true then cephadm will automatically split the value +into multiple arguments. If ``split`` is false then spaces in the value will +be retained in the argument. The default, when ``split`` is not provided, is +false. Examples: + +.. code-block:: yaml + + # An theoretical data migration service + service_type: pretend + service_name: imagine1 + placement: + hosts: + - host1 + extra_entrypoint_args: + # No spaces, always treated as a single argument + - argument: "--timout=30m" + # Splitting explicitly disabled, one single argument + - argument: "--import=/mnt/usb/My Documents" + split: false + # Splitting explicitly enabled, will become two arguments + - argument: "--tag documents" + split: true + # Splitting implicitly disabled, one single argument + - argument: "--title=Imported Documents" + + Custom Config Files =================== diff --git a/ceph/doc/cephadm/services/mds.rst b/ceph/doc/cephadm/services/mds.rst index 949a0fa5d..96b7c2dda 100644 --- a/ceph/doc/cephadm/services/mds.rst +++ b/ceph/doc/cephadm/services/mds.rst @@ -20,7 +20,18 @@ For example: ceph fs volume create --placement="" where ``fs_name`` is the name of the CephFS and ``placement`` is a -:ref:`orchestrator-cli-placement-spec`. +:ref:`orchestrator-cli-placement-spec`. For example, to place +MDS daemons for the new ``foo`` volume on hosts labeled with ``mds``: + +.. prompt:: bash # + + ceph fs volume create foo --placement="label:mds" + +You can also update the placement after-the-fact via: + +.. prompt:: bash # + + ceph orch apply mds foo 'mds-[012]' For manually deploying MDS daemons, use this specification: @@ -30,6 +41,7 @@ For manually deploying MDS daemons, use this specification: service_id: fs_name placement: count: 3 + label: mds The specification can then be applied using: diff --git a/ceph/doc/cephadm/services/mgr.rst b/ceph/doc/cephadm/services/mgr.rst index 133a00d77..9baff3a7a 100644 --- a/ceph/doc/cephadm/services/mgr.rst +++ b/ceph/doc/cephadm/services/mgr.rst @@ -4,8 +4,8 @@ MGR Service =========== -The cephadm MGR service is hosting different modules, like the :ref:`mgr-dashboard` -and the cephadm manager module. +The cephadm MGR service hosts multiple modules. These include the +:ref:`mgr-dashboard` and the cephadm manager module. .. _cephadm-mgr-networks: diff --git a/ceph/doc/cephadm/services/nfs.rst b/ceph/doc/cephadm/services/nfs.rst index 0e263275d..2f12c5916 100644 --- a/ceph/doc/cephadm/services/nfs.rst +++ b/ceph/doc/cephadm/services/nfs.rst @@ -161,6 +161,53 @@ that will tell it to bind to that specific IP. Note that in these setups, one should make sure to include ``count: 1`` in the nfs placement, as it's only possible for one nfs daemon to bind to the virtual IP. +NFS with HAProxy Protocol Support +---------------------------------- + +Cephadm supports deploying NFS in High-Availability mode with additional +HAProxy protocol support. This works just like High-availability NFS but also +supports client IP level configuration on NFS Exports. This feature requires +`NFS-Ganesha v5.0`_ or later. + +.. _NFS-Ganesha v5.0: https://github.com/nfs-ganesha/nfs-ganesha/wiki/ReleaseNotes_5 + +To use this mode, you'll either want to set up the service using the nfs module +(see :ref:`nfs-module-cluster-create`) or manually create services with the +extra parameter ``enable_haproxy_protocol`` set to true. Both NFS Service and +Ingress service must have ``enable_haproxy_protocol`` set to the same value. +For example: + +.. code-block:: yaml + + service_type: ingress + service_id: nfs.foo + placement: + count: 1 + hosts: + - host1 + - host2 + - host3 + spec: + backend_service: nfs.foo + monitor_port: 9049 + virtual_ip: 192.168.122.100/24 + enable_haproxy_protocol: true + +.. code-block:: yaml + + service_type: nfs + service_id: foo + placement: + count: 1 + hosts: + - host1 + - host2 + - host3 + spec: + port: 2049 + enable_haproxy_protocol: true + + Further Reading =============== diff --git a/ceph/doc/cephadm/services/osd.rst b/ceph/doc/cephadm/services/osd.rst index 00e414c1b..f62b0f831 100644 --- a/ceph/doc/cephadm/services/osd.rst +++ b/ceph/doc/cephadm/services/osd.rst @@ -15,10 +15,9 @@ To print a list of devices discovered by ``cephadm``, run this command: .. prompt:: bash # - ceph orch device ls [--hostname=...] [--wide] [--refresh] + ceph orch device ls [--hostname=...] [--wide] [--refresh] -Example -:: +Example:: Hostname Path Type Serial Size Health Ident Fault Available srv-01 /dev/sdb hdd 15P0A0YFFRD6 300G Unknown N/A N/A No @@ -44,7 +43,7 @@ enable cephadm's "enhanced device scan" option as follows; .. prompt:: bash # - ceph config set mgr mgr/cephadm/device_enhanced_scan true + ceph config set mgr mgr/cephadm/device_enhanced_scan true .. warning:: Although the libstoragemgmt library performs standard SCSI inquiry calls, @@ -173,16 +172,16 @@ will happen without actually creating the OSDs. For example: - .. prompt:: bash # +.. prompt:: bash # - ceph orch apply osd --all-available-devices --dry-run + ceph orch apply osd --all-available-devices --dry-run - :: +:: - NAME HOST DATA DB WAL - all-available-devices node1 /dev/vdb - - - all-available-devices node2 /dev/vdc - - - all-available-devices node3 /dev/vdd - - + NAME HOST DATA DB WAL + all-available-devices node1 /dev/vdb - - + all-available-devices node2 /dev/vdc - - + all-available-devices node3 /dev/vdd - - .. _cephadm-osd-declarative: @@ -197,9 +196,9 @@ command completes will be automatically found and added to the cluster. We will examine the effects of the following command: - .. prompt:: bash # +.. prompt:: bash # - ceph orch apply osd --all-available-devices + ceph orch apply osd --all-available-devices After running the above command: @@ -212,17 +211,17 @@ If you want to avoid this behavior (disable automatic creation of OSD on availab .. prompt:: bash # - ceph orch apply osd --all-available-devices --unmanaged=true + ceph orch apply osd --all-available-devices --unmanaged=true .. note:: - Keep these three facts in mind: + Keep these three facts in mind: - - The default behavior of ``ceph orch apply`` causes cephadm constantly to reconcile. This means that cephadm creates OSDs as soon as new drives are detected. + - The default behavior of ``ceph orch apply`` causes cephadm constantly to reconcile. This means that cephadm creates OSDs as soon as new drives are detected. - - Setting ``unmanaged: True`` disables the creation of OSDs. If ``unmanaged: True`` is set, nothing will happen even if you apply a new OSD service. + - Setting ``unmanaged: True`` disables the creation of OSDs. If ``unmanaged: True`` is set, nothing will happen even if you apply a new OSD service. - - ``ceph orch daemon add`` creates OSDs, but does not add an OSD service. + - ``ceph orch daemon add`` creates OSDs, but does not add an OSD service. * For cephadm, see also :ref:`cephadm-spec-unmanaged`. @@ -250,7 +249,7 @@ Example: Expected output:: - Scheduled OSD(s) for removal + Scheduled OSD(s) for removal OSDs that are not safe to destroy will be rejected. @@ -273,14 +272,14 @@ You can query the state of OSD operation with the following command: .. prompt:: bash # - ceph orch osd rm status + ceph orch osd rm status Expected output:: - OSD_ID HOST STATE PG_COUNT REPLACE FORCE STARTED_AT - 2 cephadm-dev done, waiting for purge 0 True False 2020-07-17 13:01:43.147684 - 3 cephadm-dev draining 17 False True 2020-07-17 13:01:45.162158 - 4 cephadm-dev started 42 False True 2020-07-17 13:01:45.162158 + OSD_ID HOST STATE PG_COUNT REPLACE FORCE STARTED_AT + 2 cephadm-dev done, waiting for purge 0 True False 2020-07-17 13:01:43.147684 + 3 cephadm-dev draining 17 False True 2020-07-17 13:01:45.162158 + 4 cephadm-dev started 42 False True 2020-07-17 13:01:45.162158 When no PGs are left on the OSD, it will be decommissioned and removed from the cluster. @@ -302,11 +301,11 @@ Example: .. prompt:: bash # - ceph orch osd rm stop 4 + ceph orch osd rm stop 4 Expected output:: - Stopped OSD(s) removal + Stopped OSD(s) removal This resets the initial state of the OSD and takes it off the removal queue. @@ -327,7 +326,7 @@ Example: Expected output:: - Scheduled OSD(s) for replacement + Scheduled OSD(s) for replacement This follows the same procedure as the procedure in the "Remove OSD" section, with one exception: the OSD is not permanently removed from the CRUSH hierarchy, but is @@ -434,10 +433,10 @@ the ``ceph orch ps`` output in the ``MEM LIMIT`` column:: To exclude an OSD from memory autotuning, disable the autotune option for that OSD and also set a specific memory target. For example, - .. prompt:: bash # +.. prompt:: bash # - ceph config set osd.123 osd_memory_target_autotune false - ceph config set osd.123 osd_memory_target 16G + ceph config set osd.123 osd_memory_target_autotune false + ceph config set osd.123 osd_memory_target 16G .. _drivegroups: @@ -500,7 +499,7 @@ Example .. prompt:: bash [monitor.1]# - ceph orch apply -i /path/to/osd_spec.yml --dry-run + ceph orch apply -i /path/to/osd_spec.yml --dry-run @@ -510,9 +509,9 @@ Filters ------- .. note:: - Filters are applied using an `AND` gate by default. This means that a drive - must fulfill all filter criteria in order to get selected. This behavior can - be adjusted by setting ``filter_logic: OR`` in the OSD specification. + Filters are applied using an `AND` gate by default. This means that a drive + must fulfill all filter criteria in order to get selected. This behavior can + be adjusted by setting ``filter_logic: OR`` in the OSD specification. Filters are used to assign disks to groups, using their attributes to group them. @@ -522,7 +521,7 @@ information about the attributes with this command: .. code-block:: bash - ceph-volume inventory + ceph-volume inventory Vendor or Model ^^^^^^^^^^^^^^^ @@ -631,9 +630,9 @@ but want to use only the first two, you could use `limit`: .. code-block:: yaml - data_devices: - vendor: VendorA - limit: 2 + data_devices: + vendor: VendorA + limit: 2 .. note:: `limit` is a last resort and shouldn't be used if it can be avoided. @@ -856,8 +855,8 @@ See :ref:`orchestrator-cli-placement-spec` .. note:: - Assuming each host has a unique disk layout, each OSD - spec needs to have a different service id + Assuming each host has a unique disk layout, each OSD + spec needs to have a different service id Dedicated wal + db @@ -987,7 +986,7 @@ activates all existing OSDs on a host. .. prompt:: bash # - ceph cephadm osd activate ... + ceph cephadm osd activate ... This will scan all existing disks for OSDs and deploy corresponding daemons. diff --git a/ceph/doc/cephadm/services/rgw.rst b/ceph/doc/cephadm/services/rgw.rst index 818648cf5..20ec39a88 100644 --- a/ceph/doc/cephadm/services/rgw.rst +++ b/ceph/doc/cephadm/services/rgw.rst @@ -239,12 +239,14 @@ It is a yaml format file with the following properties: - host2 - host3 spec: - backend_service: rgw.something # adjust to match your existing RGW service - virtual_ip: / # ex: 192.168.20.1/24 - frontend_port: # ex: 8080 - monitor_port: # ex: 1967, used by haproxy for load balancer status - virtual_interface_networks: [ ... ] # optional: list of CIDR networks - ssl_cert: | # optional: SSL certificate and key + backend_service: rgw.something # adjust to match your existing RGW service + virtual_ip: / # ex: 192.168.20.1/24 + frontend_port: # ex: 8080 + monitor_port: # ex: 1967, used by haproxy for load balancer status + virtual_interface_networks: [ ... ] # optional: list of CIDR networks + use_keepalived_multicast: # optional: Default is False. + vrrp_interface_network: / # optional: ex: 192.168.20.0/24 + ssl_cert: | # optional: SSL certificate and key -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- @@ -270,6 +272,7 @@ It is a yaml format file with the following properties: frontend_port: # ex: 8080 monitor_port: # ex: 1967, used by haproxy for load balancer status virtual_interface_networks: [ ... ] # optional: list of CIDR networks + first_virtual_router_id: # optional: default 50 ssl_cert: | # optional: SSL certificate and key -----BEGIN CERTIFICATE----- ... @@ -303,6 +306,21 @@ where the properties of this service specification are: * ``ssl_cert``: SSL certificate, if SSL is to be enabled. This must contain the both the certificate and private key blocks in .pem format. +* ``use_keepalived_multicast`` + Default is False. By default, cephadm will deploy keepalived config to use unicast IPs, + using the IPs of the hosts. The IPs chosen will be the same IPs cephadm uses to connect + to the machines. But if multicast is prefered, we can set ``use_keepalived_multicast`` + to ``True`` and Keepalived will use multicast IP (224.0.0.18) to communicate between instances, + using the same interfaces as where the VIPs are. +* ``vrrp_interface_network`` + By default, cephadm will configure keepalived to use the same interface where the VIPs are + for VRRP communication. If another interface is needed, it can be set via ``vrrp_interface_network`` + with a network to identify which ethernet interface to use. +* ``first_virtual_router_id`` + Default is 50. When deploying more than 1 ingress, this parameter can be used to ensure each + keepalived will have different virtual_router_id. In the case of using ``virtual_ips_list``, + each IP will create its own virtual router. So the first one will have ``first_virtual_router_id``, + second one will have ``first_virtual_router_id`` + 1, etc. Valid values go from 1 to 255. .. _ingress-virtual-ip: diff --git a/ceph/doc/cephadm/troubleshooting.rst b/ceph/doc/cephadm/troubleshooting.rst index 5ec692881..d891ebaf2 100644 --- a/ceph/doc/cephadm/troubleshooting.rst +++ b/ceph/doc/cephadm/troubleshooting.rst @@ -1,60 +1,56 @@ Troubleshooting =============== -You may wish to investigate why a cephadm command failed -or why a certain service no longer runs properly. +This section explains how to investigate why a cephadm command failed or why a +certain service no longer runs properly. -Cephadm deploys daemons within containers. This means that -troubleshooting those containerized daemons will require -a different process than traditional package-install daemons. +Cephadm deploys daemons within containers. Troubleshooting containerized +daemons requires a different process than does troubleshooting traditional +daemons that were installed by means of packages. -Here are some tools and commands to help you troubleshoot -your Ceph environment. +Here are some tools and commands to help you troubleshoot your Ceph +environment. .. _cephadm-pause: Pausing or Disabling cephadm ---------------------------- -If something goes wrong and cephadm is behaving badly, you can -pause most of the Ceph cluster's background activity by running -the following command: +If something goes wrong and cephadm is behaving badly, pause most of the Ceph +cluster's background activity by running the following command: .. prompt:: bash # ceph orch pause -This stops all changes in the Ceph cluster, but cephadm will -still periodically check hosts to refresh its inventory of -daemons and devices. You can disable cephadm completely by -running the following commands: +This stops all changes in the Ceph cluster, but cephadm will still periodically +check hosts to refresh its inventory of daemons and devices. Disable cephadm +completely by running the following commands: .. prompt:: bash # ceph orch set backend '' ceph mgr module disable cephadm -These commands disable all of the ``ceph orch ...`` CLI commands. -All previously deployed daemon containers continue to exist and -will start as they did before you ran these commands. +These commands disable all of the ``ceph orch ...`` CLI commands. All +previously deployed daemon containers continue to run and will start just as +they were before you ran these commands. -See :ref:`cephadm-spec-unmanaged` for information on disabling -individual services. +See :ref:`cephadm-spec-unmanaged` for more on disabling individual services. Per-service and Per-daemon Events --------------------------------- -In order to facilitate debugging failed daemons, -cephadm stores events per service and per daemon. -These events often contain information relevant to -troubleshooting your Ceph cluster. +To make it easier to debug failed daemons, cephadm stores events per service +and per daemon. These events often contain information relevant to +the troubleshooting of your Ceph cluster. Listing Service Events ~~~~~~~~~~~~~~~~~~~~~~ -To see the events associated with a certain service, run a -command of the and following form: +To see the events associated with a certain service, run a command of the +following form: .. prompt:: bash # @@ -81,8 +77,8 @@ This will return something in the following form: Listing Daemon Events ~~~~~~~~~~~~~~~~~~~~~ -To see the events associated with a certain daemon, run a -command of the and following form: +To see the events associated with a certain daemon, run a command of the +following form: .. prompt:: bash # @@ -105,32 +101,41 @@ This will return something in the following form: Checking Cephadm Logs --------------------- -To learn how to monitor cephadm logs as they are generated, read :ref:`watching_cephadm_logs`. +To learn how to monitor cephadm logs as they are generated, read +:ref:`watching_cephadm_logs`. -If your Ceph cluster has been configured to log events to files, there will be a -``ceph.cephadm.log`` file on all monitor hosts (see -:ref:`cephadm-logs` for a more complete explanation). +If your Ceph cluster has been configured to log events to files, there will be +a ``ceph.cephadm.log`` file on all monitor hosts. See :ref:`cephadm-logs` for a +more complete explanation. Gathering Log Files ------------------- -Use journalctl to gather the log files of all daemons: +Use ``journalctl`` to gather the log files of all daemons: .. note:: By default cephadm now stores logs in journald. This means that you will no longer find daemon logs in ``/var/log/ceph/``. -To read the log file of one specific daemon, run:: +To read the log file of one specific daemon, run a command of the following +form: - cephadm logs --name +.. prompt:: bash -Note: this only works when run on the same host where the daemon is running. To -get logs of a daemon running on a different host, give the ``--fsid`` option:: + cephadm logs --name - cephadm logs --fsid --name +.. Note:: This works only when run on the same host that is running the daemon. + To get the logs of a daemon that is running on a different host, add the + ``--fsid`` option to the command, as in the following example: -where the ```` corresponds to the cluster ID printed by ``ceph status``. + .. prompt:: bash -To fetch all log files of all daemons on a given host, run:: + cephadm logs --fsid --name + + In this example, ```` corresponds to the cluster ID returned by the + ``ceph status`` command. + +To fetch all log files of all daemons on a given host, run the following +for-loop:: for name in $(cephadm ls | jq -r '.[].name') ; do cephadm logs --fsid --name "$name" > $name; @@ -139,39 +144,41 @@ To fetch all log files of all daemons on a given host, run:: Collecting Systemd Status ------------------------- -To print the state of a systemd unit, run:: +To print the state of a systemd unit, run a command of the following form: - systemctl status "ceph-$(cephadm shell ceph fsid)@.service"; +.. prompt:: bash + systemctl status "ceph-$(cephadm shell ceph fsid)@.service"; -To fetch all state of all daemons of a given host, run:: - fsid="$(cephadm shell ceph fsid)" - for name in $(cephadm ls | jq -r '.[].name') ; do - systemctl status "ceph-$fsid@$name.service" > $name; - done +To fetch the state of all daemons of a given host, run the following shell +script:: + + fsid="$(cephadm shell ceph fsid)" + for name in $(cephadm ls | jq -r '.[].name') ; do + systemctl status "ceph-$fsid@$name.service" > $name; + done List all Downloaded Container Images ------------------------------------ -To list all container images that are downloaded on a host: +To list all container images that are downloaded on a host, run the following +commands: -.. note:: ``Image`` might also be called `ImageID` +.. prompt:: bash # -:: + podman ps -a --format json | jq '.[].Image' "docker.io/library/centos:8" "registry.opensuse.org/opensuse/leap:15.2" - podman ps -a --format json | jq '.[].Image' - "docker.io/library/centos:8" - "registry.opensuse.org/opensuse/leap:15.2" +.. note:: ``Image`` might also be called ``ImageID``. Manually Running Containers --------------------------- Cephadm uses small wrappers when running containers. Refer to -``/var/lib/ceph///unit.run`` for the -container execution command. +``/var/lib/ceph///unit.run`` for the container +execution command. .. _cephadm-ssh-errors: @@ -187,9 +194,10 @@ Error message:: Please make sure that the host is reachable and accepts connections using the cephadm SSH key ... -Things Ceph administrators can do: +If you receive the above error message, try the following things to +troubleshoot the SSH connection between ``cephadm`` and the monitor: -1. Ensure cephadm has an SSH identity key:: +1. Ensure that ``cephadm`` has an SSH identity key:: [root@mon1~]# cephadm shell -- ceph config-key get mgr/cephadm/ssh_identity_key > ~/cephadm_private_key INFO:cephadm:Inferring fsid f8edc08a-7f17-11ea-8707-000c2915dd98 @@ -202,20 +210,21 @@ Things Ceph administrators can do: or:: - [root@mon1 ~]# cat ~/cephadm_private_key | cephadm shell -- ceph cephadm set-ssk-key -i - + [root@mon1 ~]# cat ~/cephadm_private_key | cephadm shell -- ceph cephadm set-ssh-key -i - 2. Ensure that the SSH config is correct:: [root@mon1 ~]# cephadm shell -- ceph cephadm get-ssh-config > config -3. Verify that we can connect to the host:: +3. Verify that it is possible to connect to the host:: [root@mon1 ~]# ssh -F config -i ~/cephadm_private_key root@mon1 Verifying that the Public Key is Listed in the authorized_keys file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To verify that the public key is in the authorized_keys file, run the following commands:: +To verify that the public key is in the ``authorized_keys`` file, run the +following commands:: [root@mon1 ~]# cephadm shell -- ceph cephadm get-pub-key > ~/ceph.pub [root@mon1 ~]# grep "`cat ~/ceph.pub`" /root/.ssh/authorized_keys @@ -231,27 +240,33 @@ Or this error:: Must set public_network config option or specify a CIDR network, ceph addrvec, or plain IP -This means that you must run a command of this form:: +This means that you must run a command of this form: + +.. prompt:: bash - ceph config set mon public_network + ceph config set mon public_network -For more detail on operations of this kind, see :ref:`deploy_additional_monitors` +For more detail on operations of this kind, see +:ref:`deploy_additional_monitors`. Accessing the Admin Socket -------------------------- -Each Ceph daemon provides an admin socket that bypasses the -MONs (See :ref:`rados-monitoring-using-admin-socket`). +Each Ceph daemon provides an admin socket that bypasses the MONs (See +:ref:`rados-monitoring-using-admin-socket`). -To access the admin socket, first enter the daemon container on the host:: +#. To access the admin socket, enter the daemon container on the host:: - [root@mon1 ~]# cephadm enter --name - [ceph: root@mon1 /]# ceph --admin-daemon /var/run/ceph/ceph-.asok config show + [root@mon1 ~]# cephadm enter --name + +#. Run a command of the following form to see the admin socket's configuration:: + + [ceph: root@mon1 /]# ceph --admin-daemon /var/run/ceph/ceph-.asok config show Running Various Ceph Tools -------------------------------- -To run Ceph tools like ``ceph-objectstore-tool`` or +To run Ceph tools such as ``ceph-objectstore-tool`` or ``ceph-monstore-tool``, invoke the cephadm CLI with ``cephadm shell --name ``. For example:: @@ -268,100 +283,232 @@ To run Ceph tools like ``ceph-objectstore-tool`` or election_strategy: 1 0: [v2:127.0.0.1:3300/0,v1:127.0.0.1:6789/0] mon.myhostname -The cephadm shell sets up the environment in a way that is suitable -for extended daemon maintenance and running daemons interactively. +The cephadm shell sets up the environment in a way that is suitable for +extended daemon maintenance and for the interactive running of daemons. .. _cephadm-restore-quorum: Restoring the Monitor Quorum ---------------------------- -If the Ceph monitor daemons (mons) cannot form a quorum, cephadm will not be -able to manage the cluster until quorum is restored. +If the Ceph Monitor daemons (mons) cannot form a quorum, ``cephadm`` will not +be able to manage the cluster until quorum is restored. In order to restore the quorum, remove unhealthy monitors form the monmap by following these steps: -1. Stop all mons. For each mon host:: +1. Stop all Monitors. Use ``ssh`` to connect to each Monitor's host, and then + while connected to the Monitor's host use ``cephadm`` to stop the Monitor + daemon: + + .. prompt:: bash - ssh {mon-host} - cephadm unit --name mon.`hostname` stop + ssh {mon-host} + cephadm unit --name {mon.hostname} stop -2. Identify a surviving monitor and log in to that host:: +2. Identify a surviving Monitor and log in to its host: - ssh {mon-host} - cephadm enter --name mon.`hostname` + .. prompt:: bash -3. Follow the steps in :ref:`rados-mon-remove-from-unhealthy` + ssh {mon-host} + cephadm enter --name {mon.hostname} + +3. Follow the steps in :ref:`rados-mon-remove-from-unhealthy`. .. _cephadm-manually-deploy-mgr: Manually Deploying a Manager Daemon ----------------------------------- -At least one manager (mgr) daemon is required by cephadm in order to manage the -cluster. If the last mgr in a cluster has been removed, follow these steps in -order to deploy a manager called (for example) -``mgr.hostname.smfvfd`` on a random host of your cluster manually. +At least one Manager (``mgr``) daemon is required by cephadm in order to manage +the cluster. If the last remaining Manager has been removed from the Ceph +cluster, follow these steps in order to deploy a fresh Manager on an arbitrary +host in your cluster. In this example, the freshly-deployed Manager daemon is +called ``mgr.hostname.smfvfd``. + +#. Disable the cephadm scheduler, in order to prevent ``cephadm`` from removing + the new Manager. See :ref:`cephadm-enable-cli`: + + .. prompt:: bash # + + ceph config-key set mgr/cephadm/pause true + +#. Retrieve or create the "auth entry" for the new Manager: -Disable the cephadm scheduler, in order to prevent cephadm from removing the new -manager. See :ref:`cephadm-enable-cli`:: + .. prompt:: bash # - ceph config-key set mgr/cephadm/pause true + ceph auth get-or-create mgr.hostname.smfvfd mon "profile mgr" osd "allow *" mds "allow *" -Then get or create the auth entry for the new manager:: +#. Retrieve the Monitor's configuration: - ceph auth get-or-create mgr.hostname.smfvfd mon "profile mgr" osd "allow *" mds "allow *" + .. prompt:: bash # -Get the ceph.conf:: + ceph config generate-minimal-conf - ceph config generate-minimal-conf +#. Retrieve the container image: -Get the container image:: + .. prompt:: bash # - ceph config get "mgr.hostname.smfvfd" container_image + ceph config get "mgr.hostname.smfvfd" container_image -Create a file ``config-json.json`` which contains the information necessary to deploy -the daemon: +#. Create a file called ``config-json.json``, which contains the information + necessary to deploy the daemon: -.. code-block:: json + .. code-block:: json - { - "config": "# minimal ceph.conf for 8255263a-a97e-4934-822c-00bfe029b28f\n[global]\n\tfsid = 8255263a-a97e-4934-822c-00bfe029b28f\n\tmon_host = [v2:192.168.0.1:40483/0,v1:192.168.0.1:40484/0]\n", - "keyring": "[mgr.hostname.smfvfd]\n\tkey = V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4=\n" - } + { + "config": "# minimal ceph.conf for 8255263a-a97e-4934-822c-00bfe029b28f\n[global]\n\tfsid = 8255263a-a97e-4934-822c-00bfe029b28f\n\tmon_host = [v2:192.168.0.1:40483/0,v1:192.168.0.1:40484/0]\n", + "keyring": "[mgr.hostname.smfvfd]\n\tkey = V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4=\n" + } -Deploy the daemon:: +#. Deploy the Manager daemon: - cephadm --image deploy --fsid --name mgr.hostname.smfvfd --config-json config-json.json + .. prompt:: bash # -Analyzing Core Dumps + cephadm --image deploy --fsid --name mgr.hostname.smfvfd --config-json config-json.json + +Capturing Core Dumps --------------------- -When a Ceph daemon crashes, cephadm supports analyzing core dumps. To enable core dumps, run +A Ceph cluster that uses ``cephadm`` can be configured to capture core dumps. +The initial capture and processing of the coredump is performed by +`systemd-coredump +`_. + + +To enable coredump handling, run the following command .. prompt:: bash # - ulimit -c unlimited + ulimit -c unlimited -Core dumps will now be written to ``/var/lib/systemd/coredump``. .. note:: - Core dumps are not namespaced by the kernel, which means - they will be written to ``/var/lib/systemd/coredump`` on - the container host. + Core dumps are not namespaced by the kernel. This means that core dumps are + written to ``/var/lib/systemd/coredump`` on the container host. The ``ulimit + -c unlimited`` setting will persist only until the system is rebooted. + +Wait for the crash to happen again. To simulate the crash of a daemon, run for +example ``killall -3 ceph-mon``. + + +Running the Debugger with cephadm +---------------------------------- + +Running a single debugging session +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Initiate a debugging session by using the ``cephadm shell`` command. +From within the shell container we need to install the debugger and debuginfo +packages. To debug a core file captured by systemd, run the following: + + +#. Start the shell session: + + .. prompt:: bash # + + cephadm shell --mount /var/lib/system/coredump + +#. From within the shell session, run the following commands: + + .. prompt:: bash # + + dnf install ceph-debuginfo gdb zstd + + .. prompt:: bash # + + unzstd /var/lib/systemd/coredump/core.ceph-*.zst + + .. prompt:: bash # + + gdb /usr/bin/ceph-mon /mnt/coredump/core.ceph-*.zst + +#. Run debugger commands at gdb's prompt: + + .. prompt:: bash (gdb) + + bt + + :: + + #0 0x00007fa9117383fc in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 + #1 0x00007fa910d7f8f0 in std::condition_variable::wait(std::unique_lock&) () from /lib64/libstdc++.so.6 + #2 0x00007fa913d3f48f in AsyncMessenger::wait() () from /usr/lib64/ceph/libceph-common.so.2 + #3 0x0000563085ca3d7e in main () + + +Running repeated debugging sessions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When using ``cephadm shell``, as in the example above, any changes made to the +container that is spawned by the shell command are ephemeral. After the shell +session exits, the files that were downloaded and installed cease to be +available. You can simply re-run the same commands every time ``cephadm +shell`` is invoked, but in order to save time and resources one can create a +new container image and use it for repeated debugging sessions. + +In the following example, we create a simple file that will construct the +container image. The command below uses podman but it is expected to work +correctly even if ``podman`` is replaced with ``docker``:: + + cat >Containerfile < to customize the base image + +The above file creates a new local image named ``ceph:debugging``. This image +can be used on the same machine that built it. The image can also be pushed to +a container repository or saved and copied to a node runing other Ceph +containers. Consult the ``podman`` or ``docker`` documentation for more +information about the container workflow. + +After the image has been built, it can be used to initiate repeat debugging +sessions. By using an image in this way, you avoid the trouble of having to +re-install the debug tools and debuginfo packages every time you need to run a +debug session. To debug a core file using this image, in the same way as +previously described, run: + +.. prompt:: bash # + + cephadm --image ceph:debugging shell --mount /var/lib/system/coredump + + +Debugging live processes +~~~~~~~~~~~~~~~~~~~~~~~~ + +The gdb debugger can attach to running processes to debug them. This can be +achieved with a containerized process by using the debug image and attaching it +to the same PID namespace in which the process to be debugged resides. + +This requires running a container command with some custom arguments. We can +generate a script that can debug a process in a running container. + +.. prompt:: bash # + + cephadm --image ceph:debugging shell --dry-run > /tmp/debug.sh + +This creates a script that includes the container command that ``cephadm`` +would use to create a shell. Modify the script by removing the ``--init`` +argument and replace it with the argument that joins to the namespace used for +a running running container. For example, assume we want to debug the Manager +and have determnined that the Manager is running in a container named +``ceph-bc615290-685b-11ee-84a6-525400220000-mgr-ceph0-sluwsk``. In this case, +the argument +``--pid=container:ceph-bc615290-685b-11ee-84a6-525400220000-mgr-ceph0-sluwsk`` +should be used. -Now, wait for the crash to happen again. To simulate the crash of a daemon, run e.g. ``killall -3 ceph-mon``. +We can run our debugging container with ``sh /tmp/debug.sh``. Within the shell, +we can run commands such as ``ps`` to get the PID of the Manager process. In +the following example this is ``2``. While running gdb, we can attach to the +running process: -Install debug packages including ``ceph-debuginfo`` by entering the cephadm shelll:: +.. prompt:: bash (gdb) - # cephadm shell --mount /var/lib/systemd/coredump - [ceph: root@host1 /]# dnf install ceph-debuginfo gdb zstd - [ceph: root@host1 /]# unzstd /mnt/coredump/core.ceph-*.zst - [ceph: root@host1 /]# gdb /usr/bin/ceph-mon /mnt/coredump/core.ceph-... - (gdb) bt - #0 0x00007fa9117383fc in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 - #1 0x00007fa910d7f8f0 in std::condition_variable::wait(std::unique_lock&) () from /lib64/libstdc++.so.6 - #2 0x00007fa913d3f48f in AsyncMessenger::wait() () from /usr/lib64/ceph/libceph-common.so.2 - #3 0x0000563085ca3d7e in main () + attach 2 + info threads + bt diff --git a/ceph/doc/cephfs/administration.rst b/ceph/doc/cephfs/administration.rst index e5acc9ab8..cd912b42a 100644 --- a/ceph/doc/cephfs/administration.rst +++ b/ceph/doc/cephfs/administration.rst @@ -15,7 +15,7 @@ creation of multiple file systems use ``ceph fs flag set enable_multiple true``. :: - fs new + ceph fs new This command creates a new file system. The file system name and metadata pool name are self-explanatory. The specified data pool is the default data pool and @@ -25,19 +25,19 @@ to accommodate the new file system. :: - fs ls + ceph fs ls List all file systems by name. :: - fs lsflags + ceph fs lsflags List all the flags set on a file system. :: - fs dump [epoch] + ceph fs dump [epoch] This dumps the FSMap at the given epoch (default: current) which includes all file system settings, MDS daemons and the ranks they hold, and the list of @@ -46,7 +46,7 @@ standby MDS daemons. :: - fs rm [--yes-i-really-mean-it] + ceph fs rm [--yes-i-really-mean-it] Destroy a CephFS file system. This wipes information about the state of the file system from the FSMap. The metadata pool and data pools are untouched and @@ -54,28 +54,28 @@ must be destroyed separately. :: - fs get + ceph fs get Get information about the named file system, including settings and ranks. This -is a subset of the same information from the ``fs dump`` command. +is a subset of the same information from the ``ceph fs dump`` command. :: - fs set + ceph fs set Change a setting on a file system. These settings are specific to the named file system and do not affect other file systems. :: - fs add_data_pool + ceph fs add_data_pool Add a data pool to the file system. This pool can be used for file layouts as an alternate location to store file data. :: - fs rm_data_pool + ceph fs rm_data_pool This command removes the specified pool from the list of data pools for the file system. If any files have layouts for the removed data pool, the file @@ -84,7 +84,7 @@ system) cannot be removed. :: - fs rename [--yes-i-really-mean-it] + ceph fs rename [--yes-i-really-mean-it] Rename a Ceph file system. This also changes the application tags on the data pools and metadata pool of the file system to the new file system name. @@ -98,7 +98,7 @@ Settings :: - fs set max_file_size + ceph fs set max_file_size CephFS has a configurable maximum file size, and it's 1TB by default. You may wish to set this limit higher if you expect to store large files @@ -132,13 +132,13 @@ Taking a CephFS cluster down is done by setting the down flag: :: - fs set down true + ceph fs set down true To bring the cluster back online: :: - fs set down false + ceph fs set down false This will also restore the previous value of max_mds. MDS daemons are brought down in a way such that journals are flushed to the metadata pool and all @@ -149,11 +149,11 @@ Taking the cluster down rapidly for deletion or disaster recovery ----------------------------------------------------------------- To allow rapidly deleting a file system (for testing) or to quickly bring the -file system and MDS daemons down, use the ``fs fail`` command: +file system and MDS daemons down, use the ``ceph fs fail`` command: :: - fs fail + ceph fs fail This command sets a file system flag to prevent standbys from activating on the file system (the ``joinable`` flag). @@ -162,7 +162,7 @@ This process can also be done manually by doing the following: :: - fs set joinable false + ceph fs set joinable false Then the operator can fail all of the ranks which causes the MDS daemons to respawn as standbys. The file system will be left in a degraded state. @@ -170,7 +170,7 @@ respawn as standbys. The file system will be left in a degraded state. :: # For all ranks, 0-N: - mds fail : + ceph mds fail : Once all ranks are inactive, the file system may also be deleted or left in this state for other purposes (perhaps disaster recovery). @@ -179,7 +179,7 @@ To bring the cluster back up, simply set the joinable flag: :: - fs set joinable true + ceph fs set joinable true Daemons @@ -198,34 +198,35 @@ Commands to manipulate MDS daemons: :: - mds fail + ceph mds fail Mark an MDS daemon as failed. This is equivalent to what the cluster would do if an MDS daemon had failed to send a message to the mon for ``mds_beacon_grace`` second. If the daemon was active and a suitable -standby is available, using ``mds fail`` will force a failover to the standby. +standby is available, using ``ceph mds fail`` will force a failover to the +standby. -If the MDS daemon was in reality still running, then using ``mds fail`` +If the MDS daemon was in reality still running, then using ``ceph mds fail`` will cause the daemon to restart. If it was active and a standby was available, then the "failed" daemon will return as a standby. :: - tell mds. command ... + ceph tell mds. command ... Send a command to the MDS daemon(s). Use ``mds.*`` to send a command to all daemons. Use ``ceph tell mds.* help`` to learn available commands. :: - mds metadata + ceph mds metadata Get metadata about the given MDS known to the Monitors. :: - mds repaired + ceph mds repaired Mark the file system rank as repaired. Unlike the name suggests, this command does not change a MDS; it manipulates the file system rank which has been @@ -244,14 +245,14 @@ Commands to manipulate required client features of a file system: :: - fs required_client_features add reply_encoding - fs required_client_features rm reply_encoding + ceph fs required_client_features add reply_encoding + ceph fs required_client_features rm reply_encoding To list all CephFS features :: - fs feature ls + ceph fs feature ls Clients that are missing newly added features will be evicted automatically. @@ -346,7 +347,7 @@ Global settings :: - fs flag set [] + ceph fs flag set [] Sets a global CephFS flag (i.e. not specific to a particular file system). Currently, the only flag setting is 'enable_multiple' which allows having @@ -368,13 +369,13 @@ file system. :: - mds rmfailed + ceph mds rmfailed This removes a rank from the failed set. :: - fs reset + ceph fs reset This command resets the file system state to defaults, except for the name and pools. Non-zero ranks are saved in the stopped set. @@ -382,7 +383,7 @@ pools. Non-zero ranks are saved in the stopped set. :: - fs new --fscid --force + ceph fs new --fscid --force This command creates a file system with a specific **fscid** (file system cluster ID). You may want to do this when an application expects the file system's ID to be diff --git a/ceph/doc/cephfs/cache-configuration.rst b/ceph/doc/cephfs/cache-configuration.rst index 19cf41433..3fc757005 100644 --- a/ceph/doc/cephfs/cache-configuration.rst +++ b/ceph/doc/cephfs/cache-configuration.rst @@ -154,14 +154,8 @@ readdir. The behavior of the decay counter is the same as for cache trimming or caps recall. Each readdir call increments the counter by the number of files in the result. -The ratio of ``mds_max_caps_per_client`` that client must exceed before readdir -maybe throttled by cap acquisition throttle: - .. confval:: mds_session_max_caps_throttle_ratio -The timeout in seconds after which a client request is retried due to cap -acquisition throttling: - .. confval:: mds_cap_acquisition_throttle_retry_request_timeout If the number of caps acquired by the client per session is greater than the diff --git a/ceph/doc/cephfs/fs-volumes.rst b/ceph/doc/cephfs/fs-volumes.rst index cffca11cc..e7fd377bf 100644 --- a/ceph/doc/cephfs/fs-volumes.rst +++ b/ceph/doc/cephfs/fs-volumes.rst @@ -42,28 +42,21 @@ FS Volumes Create a volume by running the following command: - $ ceph fs volume create [] +.. prompt:: bash # + + ceph fs volume create [placement] This creates a CephFS file system and its data and metadata pools. It can also deploy MDS daemons for the filesystem using a ceph-mgr orchestrator module (for example Rook). See :doc:`/mgr/orchestrator`. -```` is the volume name (an arbitrary string). ```` is an -optional string that specifies the hosts that should have an MDS running on -them and, optionally, the total number of MDS daemons that the cluster should -have. For example, the following placement string means "deploy MDS on nodes -``host1`` and ``host2`` (one MDS per host):: - - "host1,host2" +```` is the volume name (an arbitrary string). ``[placement]`` is an +optional string that specifies the :ref:`orchestrator-cli-placement-spec` for +the MDS. See also :ref:`orchestrator-cli-cephfs` for more examples on +placement. -The following placement specification means "deploy two MDS daemons on each of -nodes ``host1`` and ``host2`` (for a total of four MDS daemons in the -cluster)":: - - "4 host1,host2" - -See :ref:`orchestrator-cli-service-spec` for more on placement specification. -Specifying placement via a YAML file is not supported. +.. note:: Specifying placement via a YAML file is not supported through the + volume interface. To remove a volume, run the following command: @@ -72,6 +65,11 @@ To remove a volume, run the following command: This removes a file system and its data and metadata pools. It also tries to remove MDS daemons using the enabled ceph-mgr orchestrator module. +.. note:: After volume deletion, it is recommended to restart `ceph-mgr` + if a new file system is created on the same cluster and subvolume interface + is being used. Please see https://tracker.ceph.com/issues/49605#note-5 + for more details. + List volumes by running the following command: $ ceph fs volume ls diff --git a/ceph/doc/cephfs/mount-using-fuse.rst b/ceph/doc/cephfs/mount-using-fuse.rst index bd098dc91..f3ac054c9 100644 --- a/ceph/doc/cephfs/mount-using-fuse.rst +++ b/ceph/doc/cephfs/mount-using-fuse.rst @@ -28,7 +28,7 @@ To FUSE-mount the Ceph file system, use the ``ceph-fuse`` command:: mkdir /mnt/mycephfs ceph-fuse --id foo /mnt/mycephfs -Option ``-id`` passes the name of the CephX user whose keyring we intend to +Option ``--id`` passes the name of the CephX user whose keyring we intend to use for mounting CephFS. In the above command, it's ``foo``. You can also use ``-n`` instead, although ``--id`` is evidently easier:: diff --git a/ceph/doc/cephfs/multimds.rst b/ceph/doc/cephfs/multimds.rst index 0193ae748..e50a5148e 100644 --- a/ceph/doc/cephfs/multimds.rst +++ b/ceph/doc/cephfs/multimds.rst @@ -226,6 +226,20 @@ For the reverse situation: The ``home/patrick`` directory and its children will be pinned to rank 2 because its export pin overrides the policy on ``home``. +To remove a partitioning policy, remove the respective extended attribute +or set the value to 0. + +.. code::bash + $ setfattr -n ceph.dir.pin.distributed -v 0 home + # or + $ setfattr -x ceph.dir.pin.distributed home + +For export pins, remove the extended attribute or set the extended attribute +value to `-1`. + +.. code::bash + $ setfattr -n ceph.dir.pin -v -1 home + Dynamic subtree partitioning with Balancer on specific ranks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/ceph/doc/cephfs/scrub.rst b/ceph/doc/cephfs/scrub.rst index 114f7580a..5b813f1c4 100644 --- a/ceph/doc/cephfs/scrub.rst +++ b/ceph/doc/cephfs/scrub.rst @@ -143,3 +143,14 @@ The types of damage that can be reported and repaired by File System Scrub are: * BACKTRACE : Inode's backtrace in the data pool is corrupted. +Evaluate strays using recursive scrub +===================================== + +- In order to evaluate strays i.e. purge stray directories in ``~mdsdir`` use the following command:: + + ceph tell mds.:0 scrub start ~mdsdir recursive + +- ``~mdsdir`` is not enqueued by default when scrubbing at the CephFS root. In order to perform stray evaluation + at root, run scrub with flags ``scrub_mdsdir`` and ``recursive``:: + + ceph tell mds.:0 scrub start / recursive,scrub_mdsdir diff --git a/ceph/doc/cephfs/snap-schedule.rst b/ceph/doc/cephfs/snap-schedule.rst index 74e2e2fd9..2b8873699 100644 --- a/ceph/doc/cephfs/snap-schedule.rst +++ b/ceph/doc/cephfs/snap-schedule.rst @@ -162,6 +162,13 @@ Examples:: snapshot creation is accounted for in the "created_count" field, which is a cumulative count of the total number of snapshots created so far. +.. note: The maximum number of snapshots to retain per directory is limited by the + config tunable `mds_max_snaps_per_dir`. This tunable defaults to 100. + To ensure a new snapshot can be created, one snapshot less than this will be + retained. So by default, a maximum of 99 snapshots will be retained. + +.. note: The --fs argument is now required if there is more than one file system. + Active and inactive schedules ----------------------------- Snapshot schedules can be added for a path that doesn't exist yet in the diff --git a/ceph/doc/cephfs/troubleshooting.rst b/ceph/doc/cephfs/troubleshooting.rst index b58d2469f..0e511526b 100644 --- a/ceph/doc/cephfs/troubleshooting.rst +++ b/ceph/doc/cephfs/troubleshooting.rst @@ -98,7 +98,7 @@ things to do: .. code:: bash - ceph config set mds mds_heartbeat_reset_grace 3600 + ceph config set mds mds_heartbeat_grace 3600 This has the effect of having the MDS continue to send beacons to the monitors even when its internal "heartbeat" mechanism has not been reset (beat) in one diff --git a/ceph/doc/dev/balancer-design.rst b/ceph/doc/dev/balancer-design.rst new file mode 100644 index 000000000..684d16352 --- /dev/null +++ b/ceph/doc/dev/balancer-design.rst @@ -0,0 +1,58 @@ +============================ +Balancing in Ceph +============================ + +Introduction +============ + +In distributed storage systems like Ceph, it is important to balance write and read requests for optimal performance. Write balancing ensures fast storage +and replication of data in a cluster, while read balancing ensures quick access and retrieval of data in a cluster. Both types of balancing are important +in distributed systems for different reasons. + +Upmap Balancing +========================== + +Importance in a Cluster +----------------------- + +Capacity balancing is a functional requirement. A system like Ceph is as full as its fullest device: When one device is full, the system can not serve write +requests anymore, and Ceph loses its function. To avoid filling up devices, we want to balance capacity across the devices in a fair way. Each device should +get a capacity proportional to its size so all devices have the same fullness level. From a performance perspective, capacity balancing creates fair share +workloads on the OSDs for write requests. + +Capacity balancing is expensive. The operation (changing the mapping of pgs) requires data movement by definition, which takes time. During this time, the +performance of the system is reduced. + +In Ceph, we can balance the write performance if all devices are homogeneous (same size and performance). + +How to Balance Capacity in Ceph +------------------------------- + +See :ref:`upmap` for more information. + +Read Balancing +============== + +Unlike capacity balancing, read balancing is not a strict requirement for Ceph’s functionality. Instead, it is a performance requirement, as it helps the system +“work” better. The overall goal is to ensure each device gets its fair share of primary OSDs so read requests are distributed evenly across OSDs in the cluster. +Unbalanced read requests lead to bad performance because of reduced overall cluster bandwidth. + +Read balancing is cheap. Unlike capacity balancing, there is no data movement involved. It is just a metadata operation, where the osdmap is updated to change +which participating OSD in a pg is primary. This operation is fast and has no impact on the cluster performance (except improved performance when the operation +completes – almost immediately). + +In Ceph, we can balance the read performance if all devices are homogeneous (same size and performance). However, in future versions, the read balancer can be improved +to achieve overall cluster performance in heterogeneous systems. + +How to Balance Reads in Ceph +---------------------------- +See :ref:`read_balancer` for more information. + +Also, see the Cephalocon 2023 talk `New Read Balancer in Ceph `_ for a demonstration of the offline version +of the read balancer. + +Plans for the Next Version +-------------------------- + +1. Improve behavior for heterogeneous OSDs in a pool +2. Offer read balancing as an online option to the balancer manager module diff --git a/ceph/doc/dev/cache-pool.rst b/ceph/doc/dev/cache-pool.rst deleted file mode 100644 index 7dc71c828..000000000 --- a/ceph/doc/dev/cache-pool.rst +++ /dev/null @@ -1,200 +0,0 @@ -Cache pool -========== - -Purpose -------- - -Use a pool of fast storage devices (probably SSDs) and use it as a -cache for an existing slower and larger pool. - -Use a replicated pool as a front-end to service most I/O, and destage -cold data to a separate erasure coded pool that does not currently (and -cannot efficiently) handle the workload. - -We should be able to create and add a cache pool to an existing pool -of data, and later remove it, without disrupting service or migrating -data around. - -Use cases ---------- - -Read-write pool, writeback -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We have an existing data pool and put a fast cache pool "in front" of -it. Writes will go to the cache pool and immediately ack. We flush -them back to the data pool based on the defined policy. - -Read-only pool, weak consistency -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We have an existing data pool and add one or more read-only cache -pools. We copy data to the cache pool(s) on read. Writes are -forwarded to the original data pool. Stale data is expired from the -cache pools based on the defined policy. - -This is likely only useful for specific applications with specific -data access patterns. It may be a match for rgw, for example. - - -Interface ---------- - -Set up a read/write cache pool foo-hot for pool foo:: - - ceph osd tier add foo foo-hot - ceph osd tier cache-mode foo-hot writeback - -Direct all traffic for foo to foo-hot:: - - ceph osd tier set-overlay foo foo-hot - -Set the target size and enable the tiering agent for foo-hot:: - - ceph osd pool set foo-hot hit_set_type bloom - ceph osd pool set foo-hot hit_set_count 1 - ceph osd pool set foo-hot hit_set_period 3600 # 1 hour - ceph osd pool set foo-hot target_max_bytes 1000000000000 # 1 TB - ceph osd pool set foo-hot min_read_recency_for_promote 1 - ceph osd pool set foo-hot min_write_recency_for_promote 1 - -Drain the cache in preparation for turning it off:: - - ceph osd tier cache-mode foo-hot forward - rados -p foo-hot cache-flush-evict-all - -When cache pool is finally empty, disable it:: - - ceph osd tier remove-overlay foo - ceph osd tier remove foo foo-hot - -Read-only pools with lazy consistency:: - - ceph osd tier add foo foo-east - ceph osd tier cache-mode foo-east readonly - ceph osd tier add foo foo-west - ceph osd tier cache-mode foo-west readonly - - - -Tiering agent -------------- - -The tiering policy is defined as properties on the cache pool itself. - -HitSet metadata -~~~~~~~~~~~~~~~ - -First, the agent requires HitSet information to be tracked on the -cache pool in order to determine which objects in the pool are being -accessed. This is enabled with:: - - ceph osd pool set foo-hot hit_set_type bloom - ceph osd pool set foo-hot hit_set_count 1 - ceph osd pool set foo-hot hit_set_period 3600 # 1 hour - -The supported HitSet types include 'bloom' (a bloom filter, the -default), 'explicit_hash', and 'explicit_object'. The latter two -explicitly enumerate accessed objects and are less memory efficient. -They are there primarily for debugging and to demonstrate pluggability -for the infrastructure. For the bloom filter type, you can additionally -define the false positive probability for the bloom filter (default is 0.05):: - - ceph osd pool set foo-hot hit_set_fpp 0.15 - -The hit_set_count and hit_set_period define how much time each HitSet -should cover, and how many such HitSets to store. Binning accesses -over time allows Ceph to independently determine whether an object was -accessed at least once and whether it was accessed more than once over -some time period ("age" vs "temperature"). - -The ``min_read_recency_for_promote`` defines how many HitSets to check for the -existence of an object when handling a read operation. The checking result is -used to decide whether to promote the object asynchronously. Its value should be -between 0 and ``hit_set_count``. If it's set to 0, the object is always promoted. -If it's set to 1, the current HitSet is checked. And if this object is in the -current HitSet, it's promoted. Otherwise not. For the other values, the exact -number of archive HitSets are checked. The object is promoted if the object is -found in any of the most recent ``min_read_recency_for_promote`` HitSets. - -A similar parameter can be set for the write operation, which is -``min_write_recency_for_promote``. :: - - ceph osd pool set {cachepool} min_read_recency_for_promote 1 - ceph osd pool set {cachepool} min_write_recency_for_promote 1 - -Note that the longer the ``hit_set_period`` and the higher the -``min_read_recency_for_promote``/``min_write_recency_for_promote`` the more RAM -will be consumed by the ceph-osd process. In particular, when the agent is active -to flush or evict cache objects, all hit_set_count HitSets are loaded into RAM. - -Cache mode -~~~~~~~~~~ - -The most important policy is the cache mode: - - ceph osd pool set foo-hot cache-mode writeback - -The supported modes are 'none', 'writeback', 'forward', and -'readonly'. Most installations want 'writeback', which will write -into the cache tier and only later flush updates back to the base -tier. Similarly, any object that is read will be promoted into the -cache tier. - -The 'forward' mode is intended for when the cache is being disabled -and needs to be drained. No new objects will be promoted or written -to the cache pool unless they are already present. A background -operation can then do something like:: - - rados -p foo-hot cache-try-flush-evict-all - rados -p foo-hot cache-flush-evict-all - -to force all data to be flushed back to the base tier. - -The 'readonly' mode is intended for read-only workloads that do not -require consistency to be enforced by the storage system. Writes will -be forwarded to the base tier, but objects that are read will get -promoted to the cache. No attempt is made by Ceph to ensure that the -contents of the cache tier(s) are consistent in the presence of object -updates. - -Cache sizing -~~~~~~~~~~~~ - -The agent performs two basic functions: flushing (writing 'dirty' -cache objects back to the base tier) and evicting (removing cold and -clean objects from the cache). - -The thresholds at which Ceph will flush or evict objects is specified -relative to a 'target size' of the pool. For example:: - - ceph osd pool set foo-hot cache_target_dirty_ratio .4 - ceph osd pool set foo-hot cache_target_dirty_high_ratio .6 - ceph osd pool set foo-hot cache_target_full_ratio .8 - -will begin flushing dirty objects when 40% of the pool is dirty and begin -evicting clean objects when we reach 80% of the target size. - -The target size can be specified either in terms of objects or bytes:: - - ceph osd pool set foo-hot target_max_bytes 1000000000000 # 1 TB - ceph osd pool set foo-hot target_max_objects 1000000 # 1 million objects - -Note that if both limits are specified, Ceph will begin flushing or -evicting when either threshold is triggered. - -Other tunables -~~~~~~~~~~~~~~ - -You can specify a minimum object age before a recently updated object is -flushed to the base tier:: - - ceph osd pool set foo-hot cache_min_flush_age 600 # 10 minutes - -You can specify the minimum age of an object before it will be evicted from -the cache tier:: - - ceph osd pool set foo-hot cache_min_evict_age 1800 # 30 minutes - - - diff --git a/ceph/doc/dev/cephfs-mirroring.rst b/ceph/doc/dev/cephfs-mirroring.rst index acd466805..a804a0075 100644 --- a/ceph/doc/dev/cephfs-mirroring.rst +++ b/ceph/doc/dev/cephfs-mirroring.rst @@ -377,7 +377,7 @@ information. To check which mirror daemon a directory has been mapped to use:: "state": "mapped" } -.. note:: `instance_id` is the RAODS instance-id associated with a mirror daemon. +.. note:: `instance_id` is the RADOS instance-id associated with a mirror daemon. Other information such as `state` and `last_shuffled` are interesting when running multiple mirror daemons. diff --git a/ceph/doc/dev/deduplication.rst b/ceph/doc/dev/deduplication.rst index a3f35e3c2..554031840 100644 --- a/ceph/doc/dev/deduplication.rst +++ b/ceph/doc/dev/deduplication.rst @@ -243,7 +243,7 @@ object size in ``POOL`` is zero (evicted) and chunks objects are genereated---th 4. Read/write I/Os After step 3, the users don't need to consider anything about I/Os. Deduplicated objects are -completely compatible with existing RAODS operations. +completely compatible with existing RADOS operations. 5. Run scrub to fix reference count diff --git a/ceph/doc/dev/developer_guide/dash-devel.rst b/ceph/doc/dev/developer_guide/dash-devel.rst index fb5ad79bd..1277cecc5 100644 --- a/ceph/doc/dev/developer_guide/dash-devel.rst +++ b/ceph/doc/dev/developer_guide/dash-devel.rst @@ -214,8 +214,8 @@ The build process is based on `Node.js `_ and requires the Prerequisites ~~~~~~~~~~~~~ - * Node 14.15.0 or higher - * NPM 6.14.9 or higher + * Node 18.17.0 or higher + * NPM 9.6.7 or higher nodeenv: During Ceph's build we create a virtualenv with ``node`` and ``npm`` diff --git a/ceph/doc/dev/developer_guide/running-tests-locally.rst b/ceph/doc/dev/developer_guide/running-tests-locally.rst index 8effd97e4..262683bfb 100644 --- a/ceph/doc/dev/developer_guide/running-tests-locally.rst +++ b/ceph/doc/dev/developer_guide/running-tests-locally.rst @@ -55,7 +55,7 @@ using `vstart_runner.py`_. To do that, you'd need `teuthology`_ installed:: $ virtualenv --python=python3 venv $ source venv/bin/activate $ pip install 'setuptools >= 12' - $ pip install git+https://github.com/ceph/teuthology#egg=teuthology[test] + $ pip install teuthology[test]@git+https://github.com/ceph/teuthology $ deactivate The above steps installs teuthology in a virtual environment. Before running diff --git a/ceph/doc/dev/mon-elections.rst b/ceph/doc/dev/mon-elections.rst index 86cfc3803..1f346aece 100644 --- a/ceph/doc/dev/mon-elections.rst +++ b/ceph/doc/dev/mon-elections.rst @@ -1,3 +1,5 @@ +.. _dev_mon_elections: + ================= Monitor Elections ================= diff --git a/ceph/doc/dev/osd_internals/manifest.rst b/ceph/doc/dev/osd_internals/manifest.rst index f998a04f2..7be4350ea 100644 --- a/ceph/doc/dev/osd_internals/manifest.rst +++ b/ceph/doc/dev/osd_internals/manifest.rst @@ -289,40 +289,6 @@ This seems complicated, but it gets us two valuable properties: All clone operations will need to consider adjacent ``chunk_maps`` when adding or removing references. -Cache/Tiering -------------- - -There already exists a cache/tiering mechanism based on whiteouts. -One goal here should ultimately be for this manifest machinery to -provide a complete replacement. - -See ``cache-pool.rst`` - -The manifest machinery already shares some code paths with the -existing cache/tiering code, mainly ``stat_flush``. - -In no particular order, here's in incomplete list of things that need -to be wired up to provide feature parity: - -* Online object access information: The osd already has pool configs - for maintaining bloom filters which provide estimates of access - recency for objects. We probably need to modify this to permit - hitset maintenance for a normal pool -- there are already - ``CEPH_OSD_OP_PG_HITSET*`` interfaces for querying them. -* Tiering agent: The osd already has a background tiering agent which - would need to be modified to instead flush and evict using - manifests. - -* Use exiting existing features regarding the cache flush policy such as - histset, age, ratio. - - hitset - - age, ratio, bytes - -* Add tiering-mode to ``manifest-tiering`` - - Writeback - - Read-only - - Data Structures =============== diff --git a/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst b/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst index 88e350c86..31ad18409 100644 --- a/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst +++ b/ceph/doc/dev/osd_internals/mclock_wpq_cmp_study.rst @@ -114,29 +114,6 @@ baseline throughput for each device type was determined: 256 KiB. For HDDs, it was 40MiB. The above throughput was obtained by running 4 KiB random writes at a queue depth of 64 for 300 secs. -Factoring I/O Cost in mClock -============================ - -The services using mClock have a cost associated with them. The cost can be -different for each service type. The mClock scheduler factors in the cost -during calculations for parameters like *reservation*, *weight* and *limit*. -The calculations determine when the next op for the service type can be -dequeued from the operation queue. In general, the higher the cost, the longer -an op remains in the operation queue. - -A cost modeling study was performed to determine the cost per I/O and the cost -per byte for SSD and HDD device types. The following cost specific options are -used under the hood by mClock, - -- :confval:`osd_mclock_cost_per_io_usec` -- :confval:`osd_mclock_cost_per_io_usec_hdd` -- :confval:`osd_mclock_cost_per_io_usec_ssd` -- :confval:`osd_mclock_cost_per_byte_usec` -- :confval:`osd_mclock_cost_per_byte_usec_hdd` -- :confval:`osd_mclock_cost_per_byte_usec_ssd` - -See :doc:`/rados/configuration/mclock-config-ref` for more details. - MClock Profile Allocations ========================== diff --git a/ceph/doc/dev/prim-balancer-design.rst b/ceph/doc/dev/prim-balancer-design.rst deleted file mode 100644 index 633a05157..000000000 --- a/ceph/doc/dev/prim-balancer-design.rst +++ /dev/null @@ -1,53 +0,0 @@ - -This document describes the requirements and high-level design of the primary -balancer for Ceph. - -Introduction -============ - -In a distributed storage system such as Ceph, there are some requirements to keep the system balanced in order to make it perform well: - -#. Balance the capacity - This is a functional requirement, a system like Ceph is "as full as its fullest device". When one device is full the system can not serve write requests anymore. In order to do this we want to balance the capacity across the devices in a fair way - that each device gets capacity proportionally to its size and therefore all the devices have the same fullness level. This is a functional requirement. From performance perspective, capacity balancing creates fair share workloads on the OSDs for *write* requests. - -#. Balance the workload - This is a performance requirement, we want to make sure that all the devices will receive a workload according to their performance. Assuming all the devices in a pool use the same technology and have the same bandwidth (a strong recommendation for a well configured system), and all devices in a pool have the same capacity, this means that for each pool, each device gets its fair share of primary OSDs so that the *read* requests are distributed evenly across the OSDs in the cluster. Managing workload balancing for devices with different capacities is discussed in the future enhancements section. - -Requirements -============ - -- For each pool, each OSD should have its fair share of PGs in which it is primary. For replicated pools, this would be the number of PGs mapped to this OSD divided by the replica size. - - This may be improved in future releases. (see below) -- Improve the existing capacity balancer code to improve its maintainability -- Primary balancing is performed without data movement (data is moved only when balancing the capacity) -- Fix the global +/-1 balancing issue that happens since the current balancer works on a single pool at a time (this is a stretch goal for the first version) - - - Problem description: In a perfectly balanced system, for each pool, each OSD has a number of PGs that ideally would have mapped to it to create a perfect capacity balancing. This number is usually not an integer, so some OSDs get a bit more PGs mapped and some a bit less. If you have many pools and you balance on a pool-by-pool basis, it is possible that some OSDs always get the "a bit more" side. When this happens, even to a single OSD, the result is non-balanced system where one OSD is more full than the others. This may happen with the current capacity balancer. - -First release (Quincy) assumptions ----------------------------------- - -- Optional - In the first version the feature will be optional and by default will be disabled -- CLI only - In the first version we will probably give access to the primary balancer only by ``osdmaptool`` CLI and will not enable it in the online balancer; this way, the use of the feature is more controlled for early adopters -- No data movement - -Future possible enhancements ----------------------------- - -- Improve the behavior for non identical OSDs in a pool -- Improve the capacity balancing behavior in extreme cases -- Add workload balancing to the online balancer -- A more futuristic feature can be to improve workload balancing based on real load statistics of the OSDs. - -High Level Design -================= - -- The capacity balancing code will remain in one function ``OSDMap::calc_pg_upmaps`` (the signature might be changed) -- The workload (a.k.a primary) balancer will be implemented in a different function -- The workload balancer will do its best based on the current status of the system - - - When called on a balanced system (capacity-wise) with pools with identical devices, it will create a near optimal workload split among the OSDs - - Calling the workload balancer on an unbalanced system (capacity-wise) may yield non optimal results, and in some cases may give worse performance than before the call - -Helper functionality --------------------- - -- Set a seed for random generation in ``osdmaptool`` (For regression tests) diff --git a/ceph/doc/dev/release-checklists.rst b/ceph/doc/dev/release-checklists.rst index 5d296621c..6299c3783 100644 --- a/ceph/doc/dev/release-checklists.rst +++ b/ceph/doc/dev/release-checklists.rst @@ -131,7 +131,7 @@ First release candidate ======================= - [x] src/ceph_release: change type to `rc` -- [ ] opt-in to all telemetry channels, generate telemetry reports, and verify no sensitive details (like pools names) are collected +- [x] opt-in to all telemetry channels, generate telemetry reports, and verify no sensitive details (like pools names) are collected First stable release diff --git a/ceph/doc/glossary.rst b/ceph/doc/glossary.rst index ed04de697..4d7037684 100644 --- a/ceph/doc/glossary.rst +++ b/ceph/doc/glossary.rst @@ -15,10 +15,12 @@ introduced in the Ceph Kraken release. The Luminous release of Ceph promoted BlueStore to the default OSD back end, supplanting FileStore. As of the Reef release, FileStore is no - longer available as a storage backend. + longer available as a storage back end. - BlueStore stores objects directly on Ceph block devices without - a mounted file system. + BlueStore stores objects directly on raw block devices or + partitions, and does not interact with mounted file systems. + BlueStore uses RocksDB's key/value database to map object names + to block locations on disk. Bucket In the context of :term:`RGW`, a bucket is a group of objects. @@ -269,7 +271,7 @@ The Ceph manager software, which collects all the state from the whole cluster in one place. - MON + :ref:`MON` The Ceph monitor software. Node @@ -328,6 +330,19 @@ Pools See :term:`pool`. + :ref:`Primary Affinity ` + The characteristic of an OSD that governs the likelihood that + a given OSD will be selected as the primary OSD (or "lead + OSD") in an acting set. Primary affinity was introduced in + Firefly (v. 0.80). See :ref:`Primary Affinity + `. + + Quorum + Quorum is the state that exists when a majority of the + :ref:`Monitors` in the cluster are ``up``. A + minimum of three :ref:`Monitors` must exist in + the cluster in order for Quorum to be possible. + RADOS **R**\eliable **A**\utonomic **D**\istributed **O**\bject **S**\tore. RADOS is the object store that provides a scalable diff --git a/ceph/doc/index.rst b/ceph/doc/index.rst index d4ccd087a..8edc2cb09 100644 --- a/ceph/doc/index.rst +++ b/ceph/doc/index.rst @@ -4,11 +4,11 @@ Ceph delivers **object, block, and file storage in one unified system**. -.. warning:: +.. warning:: - :ref:`If this is your first time using Ceph, read the "Basic Workflow" - page in the Ceph Developer Guide to learn how to contribute to the - Ceph project. (Click anywhere in this paragraph to read the "Basic + :ref:`If this is your first time using Ceph, read the "Basic Workflow" + page in the Ceph Developer Guide to learn how to contribute to the + Ceph project. (Click anywhere in this paragraph to read the "Basic Workflow" page of the Ceph Developer Guide.) `. .. note:: @@ -110,6 +110,7 @@ about Ceph, see our `Architecture`_ section. radosgw/index mgr/index mgr/dashboard + monitoring/index api/index architecture Developer Guide diff --git a/ceph/doc/install/get-packages.rst b/ceph/doc/install/get-packages.rst index c3c40b015..d6edc177b 100644 --- a/ceph/doc/install/get-packages.rst +++ b/ceph/doc/install/get-packages.rst @@ -25,17 +25,17 @@ There are three ways to get packages: Install packages with cephadm ============================= -#. Download the cephadm script +#. Download cephadm .. prompt:: bash $ :substitutions: - curl --silent --remote-name --location https://github.com/ceph/ceph/raw/|stable-release|/src/cephadm/cephadm + curl --silent --remote-name --location https://download.ceph.com/rpm-|stable-release|/el9/noarch/cephadm chmod +x cephadm #. Configure the Ceph repository based on the release name:: - ./cephadm add-repo --release nautilus + ./cephadm add-repo --release |stable-release| For Octopus (15.2.0) and later releases, you can also specify a specific version:: @@ -47,8 +47,8 @@ Install packages with cephadm ./cephadm add-repo --dev my-branch #. Install the appropriate packages. You can install them using your - package management tool (e.g., APT, Yum) directly, or you can also - use the cephadm wrapper. For example:: + package management tool (e.g., APT, Yum) directly, or you can + use the cephadm wrapper command. For example:: ./cephadm install ceph-common diff --git a/ceph/doc/man/8/ceph-monstore-tool.rst b/ceph/doc/man/8/ceph-monstore-tool.rst new file mode 100644 index 000000000..9396df0b6 --- /dev/null +++ b/ceph/doc/man/8/ceph-monstore-tool.rst @@ -0,0 +1,90 @@ +:orphan: + +====================================================== + ceph-monstore-tool -- ceph monstore manipulation tool +====================================================== + +.. program:: ceph-monstore-tool + +Synopsis +======== + +| **ceph-monstore-tool** [args|options] + + +Description +=========== + +:program:`ceph-monstore-tool` is used to manipulate MonitorDBStore's data +(monmap, osdmap, etc.) offline. It is similar to `ceph-kvstore-tool`. + +The default RocksDB debug level is `0`. This can be changed using `--debug`. + +Note: + Ceph-specific options take the format `--option-name=VAL` + DO NOT FORGET THE EQUALS SIGN. ('=') + Command-specific options must be passed after a `--` + for example, `get monmap --debug -- --version 10 --out /tmp/foo` + +Commands +======== + +:program:`ceph-monstore-tool` uses many commands for debugging purposes: + +:command:`store-copy ` + Copy the store to PATH. + +:command:`get monmap [-- options]` + Get monmap (version VER if specified) (default: last committed). + +:command:`get osdmap [-- options]` + Get osdmap (version VER if specified) (default: last committed). + +:command:`get msdmap [-- options]` + Get msdmap (version VER if specified) (default: last committed). + +:command:`get mgr [-- options]` + Get mgrmap (version VER if specified) (default: last committed). + +:command:`get crushmap [-- options]` + Get crushmap (version VER if specified) (default: last committed). + +:command:`get osd_snap [-- options]` + Get osd_snap key (`purged_snap` or `purged_epoch`). + +:command:`dump-keys` + Dump store keys to FILE (default: stdout). + +:command:`dump-paxos [-- options]` + Dump Paxos transactions (-- -- help for more info). + +:command:`dump-trace FILE [-- options]` + Dump contents of trace file FILE (-- --help for more info). + +:command:`replay-trace FILE [-- options]` + Replay trace from FILE (-- --help for more info). + +:command:`random-gen [-- options]` + Add randomly genererated ops to the store (-- --help for more info). + +:command:`rewrite-crush [-- options]` + Add a rewrite commit to the store + +:command:`rebuild` + Rebuild store. + +:command:`rm ` + Remove specified key from the store. + +Availability +============ + +**ceph-monstore-tool** is part of Ceph, a massively scalable, open-source, +distributed storage system. See the Ceph documentation at +https://docs.ceph.com for more information. + + +See also +======== + +:doc:`ceph `\(8) diff --git a/ceph/doc/man/8/osdmaptool.rst b/ceph/doc/man/8/osdmaptool.rst index d107c4e30..4ef5458e0 100644 --- a/ceph/doc/man/8/osdmaptool.rst +++ b/ceph/doc/man/8/osdmaptool.rst @@ -183,6 +183,18 @@ Options write modified osdmap with upmap or crush-adjust changes +.. option:: --read + + calculate pg upmap entries to balance pg primaries + +.. option:: --read-pool + + specify which pool the read balancer should adjust + +.. option:: --vstart + + prefix upmap and read output with './bin/' + Example ======= @@ -315,6 +327,31 @@ To simulate the active balancer in upmap mode:: osd.20 pgs 42 Total time elapsed 0.0167765 secs, 5 rounds +To simulate the active balancer in read mode, first make sure capacity is balanced +by running the balancer in upmap mode. Then, balance the reads on a replicated pool with:: + + osdmaptool osdmap --read read.out --read-pool + + ./bin/osdmaptool: osdmap file 'om' + writing upmap command output to: read.out + + ---------- BEFORE ------------ + osd.0 | primary affinity: 1 | number of prims: 3 + osd.1 | primary affinity: 1 | number of prims: 10 + osd.2 | primary affinity: 1 | number of prims: 3 + + read_balance_score of 'cephfs.a.meta': 1.88 + + + ---------- AFTER ------------ + osd.0 | primary affinity: 1 | number of prims: 5 + osd.1 | primary affinity: 1 | number of prims: 5 + osd.2 | primary affinity: 1 | number of prims: 6 + + read_balance_score of 'cephfs.a.meta': 1.13 + + + num changes: 5 Availability ============ diff --git a/ceph/doc/man/8/radosgw-admin.rst b/ceph/doc/man/8/radosgw-admin.rst index 54a66e17a..1a6358330 100644 --- a/ceph/doc/man/8/radosgw-admin.rst +++ b/ceph/doc/man/8/radosgw-admin.rst @@ -15,15 +15,15 @@ Synopsis Description =========== -:program:`radosgw-admin` is a RADOS gateway user administration utility. It -allows creating and modifying users. +:program:`radosgw-admin` is a Ceph Object Gateway user administration utility. It +is used to create and modify users. Commands ======== -:program:`radosgw-admin` utility uses many commands for administration purpose -which are as follows: +:program:`radosgw-admin` utility provides commands for administration purposes +as follows: :command:`user create` Create a new user. @@ -32,8 +32,7 @@ which are as follows: Modify a user. :command:`user info` - Display information of a user, and any potentially available - subusers and keys. + Display information for a user including any subusers and keys. :command:`user rename` Renames a user. @@ -51,7 +50,7 @@ which are as follows: Check user info. :command:`user stats` - Show user stats as accounted by quota subsystem. + Show user stats as accounted by the quota subsystem. :command:`user list` List all users. @@ -78,10 +77,10 @@ which are as follows: Remove access key. :command:`bucket list` - List buckets, or, if bucket specified with --bucket=, - list its objects. If bucket specified adding --allow-unordered - removes ordering requirement, possibly generating results more - quickly in buckets with large number of objects. + List buckets, or, if a bucket is specified with --bucket=, + list its objects. Adding --allow-unordered + removes the ordering requirement, possibly generating results more + quickly for buckets with large number of objects. :command:`bucket limit check` Show bucket sharding stats. @@ -93,8 +92,8 @@ which are as follows: Unlink bucket from specified user. :command:`bucket chown` - Link bucket to specified user and update object ACLs. - Use --marker to resume if command gets interrupted. + Change bucket ownership to the specified user and update object ACLs. + Invoke with --marker to resume if the command is interrupted. :command:`bucket stats` Returns bucket statistics. @@ -109,12 +108,13 @@ which are as follows: Rewrite all objects in the specified bucket. :command:`bucket radoslist` - List the rados objects that contain the data for all objects is - the designated bucket, if --bucket= is specified, or - otherwise all buckets. + List the RADOS objects that contain the data for all objects in + the designated bucket, if --bucket= is specified. + Otherwise, list the RADOS objects that contain data for all + buckets. :command:`bucket reshard` - Reshard a bucket. + Reshard a bucket's index. :command:`bucket sync disable` Disable bucket sync. @@ -306,16 +306,16 @@ which are as follows: Run data sync for the specified source zone. :command:`sync error list` - list sync error. + List sync errors. :command:`sync error trim` - trim sync error. + Trim sync errors. :command:`zone rename` Rename a zone. :command:`zone placement list` - List zone's placement targets. + List a zone's placement targets. :command:`zone placement add` Add a zone placement target. @@ -365,7 +365,7 @@ which are as follows: List all bucket lifecycle progress. :command:`lc process` - Manually process lifecycle. If a bucket is specified (e.g., via + Manually process lifecycle transitions. If a bucket is specified (e.g., via --bucket_id or via --bucket and optional --tenant), only that bucket is processed. @@ -385,7 +385,7 @@ which are as follows: List metadata log which is needed for multi-site deployments. :command:`mdlog trim` - Trim metadata log manually instead of relying on RGWs integrated log sync. + Trim metadata log manually instead of relying on the gateway's integrated log sync. Before trimming, compare the listings and make sure the last sync was complete, otherwise it can reinitiate a sync. @@ -397,7 +397,7 @@ which are as follows: :command:`bilog trim` Trim bucket index log (use start-marker, end-marker) manually instead - of relying on RGWs integrated log sync. + of relying on the gateway's integrated log sync. Before trimming, compare the listings and make sure the last sync was complete, otherwise it can reinitiate a sync. @@ -405,7 +405,7 @@ which are as follows: List data log which is needed for multi-site deployments. :command:`datalog trim` - Trim data log manually instead of relying on RGWs integrated log sync. + Trim data log manually instead of relying on the gateway's integrated log sync. Before trimming, compare the listings and make sure the last sync was complete, otherwise it can reinitiate a sync. @@ -413,19 +413,19 @@ which are as follows: Read data log status. :command:`orphans find` - Init and run search for leaked rados objects. + Init and run search for leaked RADOS objects. DEPRECATED. See the "rgw-orphan-list" tool. :command:`orphans finish` - Clean up search for leaked rados objects. + Clean up search for leaked RADOS objects. DEPRECATED. See the "rgw-orphan-list" tool. :command:`orphans list-jobs` - List the current job-ids for the orphans search. + List the current orphans search job IDs. DEPRECATED. See the "rgw-orphan-list" tool. :command:`role create` - create a new AWS role for use with STS. + Create a new role for use with STS (Security Token Service). :command:`role rm` Remove a role. @@ -485,7 +485,7 @@ which are as follows: Show events in a pubsub subscription :command:`subscription ack` - Ack (remove) an events in a pubsub subscription + Acknowledge (remove) events in a pubsub subscription Options @@ -499,7 +499,8 @@ Options .. option:: -m monaddress[:port] - Connect to specified monitor (instead of looking through ceph.conf). + Connect to specified monitor (instead of selecting one + from ceph.conf). .. option:: --tenant= @@ -507,19 +508,19 @@ Options .. option:: --uid=uid - The radosgw user ID. + The user on which to operate. .. option:: --new-uid=uid - ID of the new user. Used with 'user rename' command. + The new ID of the user. Used with 'user rename' command. .. option:: --subuser= - Name of the subuser. + Name of the subuser. .. option:: --access-key= - S3 access key. + S3 access key. .. option:: --email=email @@ -531,28 +532,29 @@ Options .. option:: --gen-access-key - Generate random access key (for S3). + Generate random access key (for S3). + .. option:: --gen-secret - Generate random secret key. + Generate random secret key. .. option:: --key-type= - key type, options are: swift, s3. + Key type, options are: swift, s3. .. option:: --temp-url-key[-2]= - Temporary url key. + Temporary URL key. .. option:: --max-buckets - max number of buckets for a user (0 for no limit, negative value to disable bucket creation). - Default is 1000. + Maximum number of buckets for a user (0 for no limit, negative value to disable bucket creation). + Default is 1000. .. option:: --access= - Set the access permissions for the sub-user. + Set the access permissions for the subuser. Available access permissions are read, write, readwrite and full. .. option:: --display-name= @@ -600,24 +602,24 @@ Options .. option:: --bucket-new-name=[tenant-id/] Optional for `bucket link`; use to rename a bucket. - While tenant-id/ can be specified, this is never - necessary for normal operation. + While the tenant-id can be specified, this is not + necessary in normal operation. .. option:: --shard-id= - Optional for mdlog list, bi list, data sync status. Required for ``mdlog trim``. + Optional for mdlog list, bi list, data sync status. Required for ``mdlog trim``. .. option:: --max-entries= - Optional for listing operations to specify the max entries. + Optional for listing operations to specify the max entries. .. option:: --purge-data - When specified, user removal will also purge all the user data. + When specified, user removal will also purge the user's data. .. option:: --purge-keys - When specified, subuser removal will also purge all the subuser keys. + When specified, subuser removal will also purge the subuser' keys. .. option:: --purge-objects @@ -625,7 +627,7 @@ Options .. option:: --metadata-key= - Key to retrieve metadata from with ``metadata get``. + Key from which to retrieve metadata, used with ``metadata get``. .. option:: --remote= @@ -633,11 +635,11 @@ Options .. option:: --period= - Period id. + Period ID. .. option:: --url= - url for pushing/pulling period or realm. + URL for pushing/pulling period or realm. .. option:: --epoch= @@ -657,7 +659,7 @@ Options .. option:: --master-zone= - Master zone id. + Master zone ID. .. option:: --rgw-realm= @@ -665,11 +667,11 @@ Options .. option:: --realm-id= - The realm id. + The realm ID. .. option:: --realm-new-name= - New name of realm. + New name for the realm. .. option:: --rgw-zonegroup= @@ -677,7 +679,7 @@ Options .. option:: --zonegroup-id= - The zonegroup id. + The zonegroup ID. .. option:: --zonegroup-new-name= @@ -685,11 +687,11 @@ Options .. option:: --rgw-zone= - Zone in which radosgw is running. + Zone in which the gateway is running. .. option:: --zone-id= - The zone id. + The zone ID. .. option:: --zone-new-name= @@ -709,7 +711,7 @@ Options .. option:: --placement-id - Placement id for the zonegroup placement commands. + Placement ID for the zonegroup placement commands. .. option:: --tags= @@ -737,7 +739,7 @@ Options .. option:: --data-extra-pool= - The placement target data extra (non-ec) pool. + The placement target data extra (non-EC) pool. .. option:: --placement-index-type= @@ -765,11 +767,11 @@ Options .. option:: --sync-from=[zone-name][,...] - Set the list of zones to sync from. + Set the list of zones from which to sync. .. option:: --sync-from-rm=[zone-name][,...] - Remove the zones from list of zones to sync from. + Remove zone(s) from list of zones from which to sync. .. option:: --bucket-index-max-shards @@ -780,71 +782,71 @@ Options .. option:: --fix - Besides checking bucket index, will also fix it. + Fix the bucket index in addition to checking it. .. option:: --check-objects - bucket check: Rebuilds bucket index according to actual objects state. + Bucket check: Rebuilds the bucket index according to actual object state. .. option:: --format= - Specify output format for certain operations. Supported formats: xml, json. + Specify output format for certain operations. Supported formats: xml, json. .. option:: --sync-stats - Option for 'user stats' command. When specified, it will update user stats with - the current stats reported by user's buckets indexes. + Option for the 'user stats' command. When specified, it will update user stats with + the current stats reported by the user's buckets indexes. .. option:: --show-config - Show configuration. + Show configuration. .. option:: --show-log-entries= - Enable/disable dump of log entries on log show. + Enable/disable dumping of log entries on log show. .. option:: --show-log-sum= - Enable/disable dump of log summation on log show. + Enable/disable dump of log summation on log show. .. option:: --skip-zero-entries - Log show only dumps entries that don't have zero value in one of the numeric - field. + Log show only dumps entries that don't have zero value in one of the numeric + field. .. option:: --infile - Specify a file to read in when setting data. + Specify a file to read when setting data. .. option:: --categories= - Comma separated list of categories, used in usage show. + Comma separated list of categories, used in usage show. .. option:: --caps= - List of caps (e.g., "usage=read, write; user=read"). + List of capabilities (e.g., "usage=read, write; user=read"). .. option:: --compression= - Placement target compression algorithm (lz4|snappy|zlib|zstd) + Placement target compression algorithm (lz4|snappy|zlib|zstd). .. option:: --yes-i-really-mean-it - Required for certain operations. + Required as a guardrail for certain destructive operations. .. option:: --min-rewrite-size - Specify the min object size for bucket rewrite (default 4M). + Specify the minimum object size for bucket rewrite (default 4M). .. option:: --max-rewrite-size - Specify the max object size for bucket rewrite (default ULLONG_MAX). + Specify the maximum object size for bucket rewrite (default ULLONG_MAX). .. option:: --min-rewrite-stripe-size - Specify the min stripe size for object rewrite (default 0). If the value + Specify the minimum stripe size for object rewrite (default 0). If the value is set to 0, then the specified object will always be - rewritten for restriping. + rewritten when restriping. .. option:: --warnings-only @@ -854,7 +856,7 @@ Options .. option:: --bypass-gc When specified with bucket deletion, - triggers object deletions by not involving GC. + triggers object deletion without involving GC. .. option:: --inconsistent-index @@ -863,25 +865,25 @@ Options .. option:: --max-concurrent-ios - Maximum concurrent ios for bucket operations. Affects operations that - scan the bucket index, e.g., listing, deletion, and all scan/search - operations such as finding orphans or checking the bucket index. - Default is 32. + Maximum concurrent bucket operations. Affects operations that + scan the bucket index, e.g., listing, deletion, and all scan/search + operations such as finding orphans or checking the bucket index. + The default is 32. Quota Options ============= .. option:: --max-objects - Specify max objects (negative value to disable). + Specify the maximum number of objects (negative value to disable). .. option:: --max-size - Specify max size (in B/K/M/G/T, negative value to disable). + Specify the maximum object size (in B/K/M/G/T, negative value to disable). .. option:: --quota-scope - The scope of quota (bucket, user). + The scope of quota (bucket, user). Orphans Search Options @@ -889,16 +891,16 @@ Orphans Search Options .. option:: --num-shards - Number of shards to use for keeping the temporary scan info + Number of shards to use for temporary scan info .. option:: --orphan-stale-secs - Number of seconds to wait before declaring an object to be an orphan. - Default is 86400 (24 hours). + Number of seconds to wait before declaring an object to be an orphan. + The efault is 86400 (24 hours). .. option:: --job-id - Set the job id (for orphans find) + Set the job id (for orphans find) Orphans list-jobs options diff --git a/ceph/doc/man/8/radosgw.rst b/ceph/doc/man/8/radosgw.rst index a3cb00b82..d31f3de8e 100644 --- a/ceph/doc/man/8/radosgw.rst +++ b/ceph/doc/man/8/radosgw.rst @@ -53,10 +53,6 @@ Options Run in foreground, log to usual location -.. option:: --rgw-socket-path=path - - Specify a unix domain socket path. - .. option:: --rgw-region=region The region where radosgw runs @@ -80,30 +76,24 @@ and ``mod_proxy_fcgi`` have to be present in the server. Unlike ``mod_fastcgi``, or process management may be available in the FastCGI application framework in use. -``Apache`` can be configured in a way that enables ``mod_proxy_fcgi`` to be used -with localhost tcp or through unix domain socket. ``mod_proxy_fcgi`` that doesn't -support unix domain socket such as the ones in Apache 2.2 and earlier versions of -Apache 2.4, needs to be configured for use with localhost tcp. Later versions of -Apache like Apache 2.4.9 or later support unix domain socket and as such they -allow for the configuration with unix domain socket instead of localhost tcp. +``Apache`` must be configured in a way that enables ``mod_proxy_fcgi`` to be +used with localhost tcp. The following steps show the configuration in Ceph's configuration file i.e, ``/etc/ceph/ceph.conf`` and the gateway configuration file i.e, ``/etc/httpd/conf.d/rgw.conf`` (RPM-based distros) or ``/etc/apache2/conf-available/rgw.conf`` (Debian-based distros) with localhost -tcp and through unix domain socket: +tcp: #. For distros with Apache 2.2 and early versions of Apache 2.4 that use - localhost TCP and do not support Unix Domain Socket, append the following - contents to ``/etc/ceph/ceph.conf``:: + localhost TCP, append the following contents to ``/etc/ceph/ceph.conf``:: [client.radosgw.gateway] host = {hostname} keyring = /etc/ceph/ceph.client.radosgw.keyring - rgw socket path = "" - log file = /var/log/ceph/client.radosgw.gateway.log - rgw frontends = fastcgi socket_port=9000 socket_host=0.0.0.0 - rgw print continue = false + log_file = /var/log/ceph/client.radosgw.gateway.log + rgw_frontends = fastcgi socket_port=9000 socket_host=0.0.0.0 + rgw_print_continue = false #. Add the following content in the gateway configuration file: @@ -149,16 +139,6 @@ tcp and through unix domain socket: -#. For distros with Apache 2.4.9 or later that support Unix Domain Socket, - append the following configuration to ``/etc/ceph/ceph.conf``:: - - [client.radosgw.gateway] - host = {hostname} - keyring = /etc/ceph/ceph.client.radosgw.keyring - rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock - log file = /var/log/ceph/client.radosgw.gateway.log - rgw print continue = false - #. Add the following content in the gateway configuration file: For CentOS/RHEL add in ``/etc/httpd/conf.d/rgw.conf``:: @@ -182,10 +162,6 @@ tcp and through unix domain socket: - Please note, ``Apache 2.4.7`` does not have Unix Domain Socket support in - it and as such it has to be configured with localhost tcp. The Unix Domain - Socket support is available in ``Apache 2.4.9`` and later versions. - #. Generate a key for radosgw to use for authentication with the cluster. :: ceph-authtool -C -n client.radosgw.gateway --gen-key /etc/ceph/keyring.radosgw.gateway diff --git a/ceph/doc/mgr/nfs.rst b/ceph/doc/mgr/nfs.rst index c25410fcd..7e6637684 100644 --- a/ceph/doc/mgr/nfs.rst +++ b/ceph/doc/mgr/nfs.rst @@ -107,22 +107,29 @@ of the details of NFS redirecting traffic on the virtual IP to the appropriate backend NFS servers, and redeploying NFS servers when they fail. -If a user additionally supplies ``--ingress-mode keepalive-only`` a -partial *ingress* service will be deployed that still provides a virtual -IP, but has nfs directly binding to that virtual IP and leaves out any -sort of load balancing or traffic redirection. This setup will restrict -users to deploying only 1 nfs daemon as multiple cannot bind to the same -port on the virtual IP. - -Instead providing ``--ingress-mode default`` will result in the same setup -as not providing the ``--ingress-mode`` flag. In this setup keepalived will be -deployed to handle forming the virtual IP and haproxy will be deployed -to handle load balancing and traffic redirection. - -Enabling ingress via the ``ceph nfs cluster create`` command deploys a -simple ingress configuration with the most common configuration -options. Ingress can also be added to an existing NFS service (e.g., -one created without the ``--ingress`` flag), and the basic NFS service can +An optional ``--ingress-mode`` parameter can be provided to choose +how the *ingress* service is configured: + +- Setting ``--ingress-mode keepalive-only`` deploys a simplified *ingress* + service that provides a virtual IP with the nfs server directly binding to + that virtual IP and leaves out any sort of load balancing or traffic + redirection. This setup will restrict users to deploying only 1 nfs daemon + as multiple cannot bind to the same port on the virtual IP. +- Setting ``--ingress-mode haproxy-standard`` deploys a full *ingress* service + to provide load balancing and high-availability using HAProxy and keepalived. + Client IP addresses are not visible to the back-end NFS server and IP level + restrictions on NFS exports will not function. +- Setting ``--ingress-mode haproxy-protocol`` deploys a full *ingress* service + to provide load balancing and high-availability using HAProxy and keepalived. + Client IP addresses are visible to the back-end NFS server and IP level + restrictions on NFS exports are usable. This mode requires NFS Ganesha version + 5.0 or later. +- Setting ``--ingress-mode default`` is equivalent to not providing any other + ingress mode by name. When no other ingress mode is specified by name + the default ingress mode used is ``haproxy-standard``. + +Ingress can be added to an existing NFS service (e.g., one initially created +without the ``--ingress`` flag), and the basic NFS service can also be modified after the fact to include non-default options, by modifying the services directly. For more information, see :ref:`cephadm-ha-nfs`. diff --git a/ceph/doc/mgr/prometheus.rst b/ceph/doc/mgr/prometheus.rst index 698b6a2d5..25a7b0d08 100644 --- a/ceph/doc/mgr/prometheus.rst +++ b/ceph/doc/mgr/prometheus.rst @@ -41,6 +41,7 @@ Configuration .. confval:: rbd_stats_pools_refresh_interval .. confval:: standby_behaviour .. confval:: standby_error_status_code +.. confval:: exclude_perf_counters By default the module will accept HTTP requests on port ``9283`` on all IPv4 and IPv6 addresses on the host. The port and listen address are both @@ -217,6 +218,15 @@ the module option ``exclude_perf_counters`` to ``false``: ceph config set mgr mgr/prometheus/exclude_perf_counters false +Ceph daemon performance counters metrics +----------------------------------------- + +With the introduction of ``ceph-exporter`` daemon, the prometheus module will no longer export Ceph daemon +perf counters as prometheus metrics by default. However, one may re-enable exporting these metrics by setting +the module option ``exclude_perf_counters`` to ``false``:: + + ceph config set mgr mgr/prometheus/exclude_perf_counters false + Statistic names and labels ========================== diff --git a/ceph/doc/monitoring/index.rst b/ceph/doc/monitoring/index.rst new file mode 100644 index 000000000..2bf2aca90 --- /dev/null +++ b/ceph/doc/monitoring/index.rst @@ -0,0 +1,474 @@ +.. _monitoring: + +=================== +Monitoring overview +=================== + +The aim of this part of the documentation is to explain the Ceph monitoring +stack and the meaning of the main Ceph metrics. + +With a good understand of the Ceph monitoring stack and metrics users can +create customized monitoring tools, like Prometheus queries, Grafana +dashboards, or scripts. + + +Ceph Monitoring stack +===================== + +Ceph provides a default monitoring stack wich is installed by cephadm and +explained in the :ref:`Monitoring Services ` section of +the cephadm documentation. + + +Ceph metrics +============ + +The main source for Ceph metrics are the performance counters exposed by each +Ceph daemon. The :doc:`../dev/perf_counters` are native Ceph monitoring data + +Performance counters are transformed into standard Prometheus metrics by the +Ceph exporter daemon. This daemon runs on every Ceph cluster host and exposes a +metrics end point where all the performance counters exposed by all the Ceph +daemons running in the host are published in the form of Prometheus metrics. + +In addition to the Ceph exporter, there is another agent to expose Ceph +metrics. It is the Prometheus manager module, wich exposes metrics related to +the whole cluster, basically metrics that are not produced by individual Ceph +daemons. + +The main source for obtaining Ceph metrics is the metrics endpoint exposed by +the Cluster Prometheus server. Ceph can provide you with the Prometheus +endpoint where you can obtain the complete list of metrics (coming from Ceph +exporter daemons and Prometheus manager module) and exeute queries. + +Use the following command to obtain the Prometheus server endpoint in your +cluster: + +Example: + +.. code-block:: bash + + # ceph orch ps --service_name prometheus + NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID + prometheus.cephtest-node-00 cephtest-node-00.cephlab.com *:9095 running (103m) 50s ago 5w 142M - 2.33.4 514e6a882f6e efe3cbc2e521 + +With this information you can connect to +``http://cephtest-node-00.cephlab.com:9095`` to access the Prometheus server +interface. + +And the complete list of metrics (with help) for your cluster will be available +in: + +``http://cephtest-node-00.cephlab.com:9095/api/v1/targets/metadata`` + + +It is good to outline that the main tool allowing users to observe and monitor a Ceph cluster is the **Ceph dashboard**. It provides graphics where the most important cluster and service metrics are represented. Most of the examples in this document are extracted from the dashboard graphics or extrapolated from the metrics exposed by the Ceph dashboard. + + +Performance metrics +=================== + +Main metrics used to measure Cluster Ceph performance: + +All metrics have the following labels: +``ceph_daemon``: identifier of the OSD daemon generating the metric +``instance``: the IP address of the ceph exporter instance exposing the metric. +``job``: prometheus scrape job + +Example: + +.. code-block:: bash + + ceph_osd_op_r{ceph_daemon="osd.0", instance="192.168.122.7:9283", job="ceph"} = 73981 + +*Cluster I/O (throughput):* +Use ``ceph_osd_op_r_out_bytes`` and ``ceph_osd_op_w_in_bytes`` to obtain the cluster throughput generated by clients + +Example: + +.. code-block:: bash + + Writes (B/s): + sum(irate(ceph_osd_op_w_in_bytes[1m])) + + Reads (B/s): + sum(irate(ceph_osd_op_r_out_bytes[1m])) + + +*Cluster I/O (operations):* +Use ``ceph_osd_op_r``, ``ceph_osd_op_w`` to obtain the number of operations generated by clients + +Example: + +.. code-block:: bash + + Writes (ops/s): + sum(irate(ceph_osd_op_w[1m])) + + Reads (ops/s): + sum(irate(ceph_osd_op_r[1m])) + +*Latency:* +Use ``ceph_osd_op_latency_sum`` wich represents the delay before a OSD transfer of data begins following a client instruction for its transfer + +Example: + +.. code-block:: bash + + sum(irate(ceph_osd_op_latency_sum[1m])) + + +OSD performance +=============== + +The previous explained cluster performance metrics are based in OSD metrics, selecting the right label we can obtain for a single OSD the same performance information explained for the cluster: + +Example: + +.. code-block:: bash + + OSD 0 read latency + irate(ceph_osd_op_r_latency_sum{ceph_daemon=~"osd.0"}[1m]) / on (ceph_daemon) irate(ceph_osd_op_r_latency_count[1m]) + + OSD 0 write IOPS + irate(ceph_osd_op_w{ceph_daemon=~"osd.0"}[1m]) + + OSD 0 write thughtput (bytes) + irate(ceph_osd_op_w_in_bytes{ceph_daemon=~"osd.0"}[1m]) + + OSD.0 total raw capacity available + ceph_osd_stat_bytes{ceph_daemon="osd.0", instance="cephtest-node-00.cephlab.com:9283", job="ceph"} = 536451481 + + +Physical disk performance: +========================== + +Combining Prometheus ``node_exporter`` metrics with Ceph metrics we can have +information about the performance provided by physical disks used by OSDs. + +Example: + +.. code-block:: bash + + Read latency of device used by OSD 0: + label_replace(irate(node_disk_read_time_seconds_total[1m]) / irate(node_disk_reads_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"osd.0"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*") + + Write latency of device used by OSD 0 + label_replace(irate(node_disk_write_time_seconds_total[1m]) / irate(node_disk_writes_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"osd.0"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*") + + IOPS (device used by OSD.0) + reads: + label_replace(irate(node_disk_reads_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"osd.0"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*") + + writes: + label_replace(irate(node_disk_writes_completed_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"osd.0"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*") + + Throughput (device used by OSD.0) + reads: + label_replace(irate(node_disk_read_bytes_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"osd.0"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*") + + writes: + label_replace(irate(node_disk_written_bytes_total[1m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"osd.0"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*") + + Physical Device Utilization (%) for OSD.0 in the last 5 minutes + label_replace(irate(node_disk_io_time_seconds_total[5m]), "instance", "$1", "instance", "([^:.]*).*") and on (instance, device) label_replace(label_replace(ceph_disk_occupation_human{ceph_daemon=~"osd.0"}, "device", "$1", "device", "/dev/(.*)"), "instance", "$1", "instance", "([^:.]*).*") + +Pool metrics +============ + +These metrics have the following labels: +``instance``: the ip address of the Ceph exporter daemon producing the metric. +``pool_id``: identifier of the pool +``job``: prometheus scrape job + + +- ``ceph_pool_metadata``: Information about the pool It can be used together + with other metrics to provide more contextual information in queries and + graphs. Apart of the three common labels this metric provide the following + extra labels: + + - ``compression_mode``: compression used in the pool (lz4, snappy, zlib, + zstd, none). Example: compression_mode="none" + + - ``description``: brief description of the pool type (replica:number of + replicas or Erasure code: ec profile). Example: description="replica:3" + - ``name``: name of the pool. Example: name=".mgr" + - ``type``: type of pool (replicated/erasure code). Example: type="replicated" + +- ``ceph_pool_bytes_used``: Total raw capacity consumed by user data and associated overheads by pool (metadata + redundancy): + +- ``ceph_pool_stored``: Total of CLIENT data stored in the pool + +- ``ceph_pool_compress_under_bytes``: Data eligible to be compressed in the pool + +- ``ceph_pool_compress_bytes_used``: Data compressed in the pool + +- ``ceph_pool_rd``: CLIENT read operations per pool (reads per second) + +- ``ceph_pool_rd_bytes``: CLIENT read operations in bytes per pool + +- ``ceph_pool_wr``: CLIENT write operations per pool (writes per second) + +- ``ceph_pool_wr_bytes``: CLIENT write operation in bytes per pool + + +**Useful queries**: + +.. code-block:: bash + + Total raw capacity available in the cluster: + sum(ceph_osd_stat_bytes) + + Total raw capacity consumed in the cluster (including metadata + redundancy): + sum(ceph_pool_bytes_used) + + Total of CLIENT data stored in the cluster: + sum(ceph_pool_stored) + + Compression savings: + sum(ceph_pool_compress_under_bytes - ceph_pool_compress_bytes_used) + + CLIENT IOPS for a pool (testrbdpool) + reads: irate(ceph_pool_rd[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"testrbdpool"} + writes: irate(ceph_pool_wr[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"testrbdpool"} + + CLIENT Throughput for a pool + reads: irate(ceph_pool_rd_bytes[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"testrbdpool"} + writes: irate(ceph_pool_wr_bytes[1m]) * on(pool_id) group_left(instance,name) ceph_pool_metadata{name=~"testrbdpool"} + +Object metrics +============== + +These metrics have the following labels: +``instance``: the ip address of the ceph exporter daemon providing the metric +``instance_id``: identifier of the rgw daemon +``job``: prometheus scrape job + +Example: + +.. code-block:: bash + + ceph_rgw_req{instance="192.168.122.7:9283", instance_id="154247", job="ceph"} = 12345 + + +Generic metrics +--------------- +- ``ceph_rgw_metadata``: Provides generic information about the RGW daemon. It + can be used together with other metrics to provide more contextual + information in queries and graphs. Apart from the three common labels, this + metric provides the following extra labels: + + - ``ceph_daemon``: Name of the Ceph daemon. Example: + ceph_daemon="rgw.rgwtest.cephtest-node-00.sxizyq", + - ``ceph_version``: Version of Ceph daemon. Example: ceph_version="ceph + version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)", + - ``hostname``: Name of the host where the daemon runs. Example: + hostname:"cephtest-node-00.cephlab.com", + +- ``ceph_rgw_req``: Number total of requests for the daemon (GET+PUT+DELETE) + Useful to detect bottlenecks and optimize load distribution. + +- ``ceph_rgw_qlen``: RGW operations queue length for the daemon. + Useful to detect bottlenecks and optimize load distribution. + +- ``ceph_rgw_failed_req``: Aborted requests. + Useful to detect daemon errors + + +GET operations: related metrics +------------------------------- +- ``ceph_rgw_get_initial_lat_count``: Number of get operations + +- ``ceph_rgw_get_initial_lat_sum``: Total latency time for the GET operations + +- ``ceph_rgw_get``: Number total of GET requests + +- ``ceph_rgw_get_b``: Total bytes transferred in GET operations + + +Put operations: related metrics +------------------------------- +- ``ceph_rgw_put_initial_lat_count``: Number of get operations + +- ``ceph_rgw_put_initial_lat_sum``: Total latency time for the PUT operations + +- ``ceph_rgw_put``: Total number of PUT operations + +- ``ceph_rgw_get_b``: Total bytes transferred in PUT operations + + +Useful queries +-------------- + +.. code-block:: bash + + The average of get latencies: + rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata + + The average of put latencies: + rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata + + Total requests per second: + rate(ceph_rgw_req[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata + + Total number of "other" operations (LIST, DELETE) + rate(ceph_rgw_req[30s]) - (rate(ceph_rgw_get[30s]) + rate(ceph_rgw_put[30s])) + + GET latencies + rate(ceph_rgw_get_initial_lat_sum[30s]) / rate(ceph_rgw_get_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata + + PUT latencies + rate(ceph_rgw_put_initial_lat_sum[30s]) / rate(ceph_rgw_put_initial_lat_count[30s]) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata + + Bandwidth consumed by GET operations + sum(rate(ceph_rgw_get_b[30s])) + + Bandwidth consumed by PUT operations + sum(rate(ceph_rgw_put_b[30s])) + + Bandwidth consumed by RGW instance (PUTs + GETs) + sum by (instance_id) (rate(ceph_rgw_get_b[30s]) + rate(ceph_rgw_put_b[30s])) * on (instance_id) group_left (ceph_daemon) ceph_rgw_metadata + + Http errors: + rate(ceph_rgw_failed_req[30s]) + + +Filesystem Metrics +================== + +These metrics have the following labels: +``ceph_daemon``: The name of the MDS daemon +``instance``: the ip address (and port) of of the Ceph exporter daemon exposing the metric +``job``: prometheus scrape job + +Example: + +.. code-block:: bash + + ceph_mds_request{ceph_daemon="mds.test.cephtest-node-00.hmhsoh", instance="192.168.122.7:9283", job="ceph"} = 1452 + + +Main metrics +------------ + +- ``ceph_mds_metadata``: Provides general information about the MDS daemon. It + can be used together with other metrics to provide more contextual + information in queries and graphs. It provides the following extra labels: + + - ``ceph_version``: MDS daemon Ceph version + - ``fs_id``: filesystem cluster id + - ``hostname``: Host name where the MDS daemon runs + - ``public_addr``: Public address where the MDS daemon runs + - ``rank``: Rank of the MDS daemon + +Example: + +.. code-block:: bash + + ceph_mds_metadata{ceph_daemon="mds.test.cephtest-node-00.hmhsoh", ceph_version="ceph version 17.2.6 (d7ff0d10654d2280e08f1ab989c7cdf3064446a5) quincy (stable)", fs_id="-1", hostname="cephtest-node-00.cephlab.com", instance="cephtest-node-00.cephlab.com:9283", job="ceph", public_addr="192.168.122.145:6801/118896446", rank="-1"} + + +- ``ceph_mds_request``: Total number of requests for the MDs daemon + +- ``ceph_mds_reply_latency_sum``: Reply latency total + +- ``ceph_mds_reply_latency_count``: Reply latency count + +- ``ceph_mds_server_handle_client_request``: Number of client requests + +- ``ceph_mds_sessions_session_count``: Session count + +- ``ceph_mds_sessions_total_load``: Total load + +- ``ceph_mds_sessions_sessions_open``: Sessions currently open + +- ``ceph_mds_sessions_sessions_stale``: Sessions currently stale + +- ``ceph_objecter_op_r``: Number of read operations + +- ``ceph_objecter_op_w``: Number of write operations + +- ``ceph_mds_root_rbytes``: Total number of bytes managed by the daemon + +- ``ceph_mds_root_rfiles``: Total number of files managed by the daemon + + +Useful queries: +--------------- + +.. code-block:: bash + + Total MDS daemons read workload: + sum(rate(ceph_objecter_op_r[1m])) + + Total MDS daemons write workload: + sum(rate(ceph_objecter_op_w[1m])) + + MDS daemon read workload: (daemon name is "mdstest") + sum(rate(ceph_objecter_op_r{ceph_daemon=~"mdstest"}[1m])) + + MDS daemon write workload: (daemon name is "mdstest") + sum(rate(ceph_objecter_op_r{ceph_daemon=~"mdstest"}[1m])) + + The average of reply latencies: + rate(ceph_mds_reply_latency_sum[30s]) / rate(ceph_mds_reply_latency_count[30s]) + + Total requests per second: + rate(ceph_mds_request[30s]) * on (instance) group_right (ceph_daemon) ceph_mds_metadata + + +Block metrics +============= + +By default RBD metrics for images are not available in order to provide the +best performance in the prometheus manager module. + +To produce metrics for RBD images it is needed to configure properly the +manager option ``mgr/prometheus/rbd_stats_pools``. For more information please +see :ref:`prometheus-rbd-io-statistics` + + +These metrics have the following labels: +``image``: Name of the image which produces the metric value. +``instance``: Node where the rbd metric is produced. (It points to the Ceph exporter daemon) +``job``: Name of the Prometheus scrape job. +``pool``: Image pool name. + +Example: + +.. code-block:: bash + + ceph_rbd_read_bytes{image="test2", instance="cephtest-node-00.cephlab.com:9283", job="ceph", pool="testrbdpool"} + + +Main metrics +------------ + +- ``ceph_rbd_read_bytes``: RBD image bytes read + +- ``ceph_rbd_read_latency_count``: RBD image reads latency count + +- ``ceph_rbd_read_latency_sum``: RBD image reads latency total + +- ``ceph_rbd_read_ops``: RBD image reads count + +- ``ceph_rbd_write_bytes``: RBD image bytes written + +- ``ceph_rbd_write_latency_count``: RBD image writes latency count + +- ``ceph_rbd_write_latency_sum``: RBD image writes latency total + +- ``ceph_rbd_write_ops``: RBD image writes count + + +Useful queries +-------------- + +.. code-block:: bash + + The average of read latencies: + rate(ceph_rbd_read_latency_sum[30s]) / rate(ceph_rbd_read_latency_count[30s]) * on (instance) group_left (ceph_daemon) ceph_rgw_metadata + + + + diff --git a/ceph/doc/rados/configuration/ceph-conf.rst b/ceph/doc/rados/configuration/ceph-conf.rst index f62a21545..d8d5c9d03 100644 --- a/ceph/doc/rados/configuration/ceph-conf.rst +++ b/ceph/doc/rados/configuration/ceph-conf.rst @@ -4,7 +4,7 @@ Configuring Ceph ================== -When Ceph services start, the initialization process activates a series of +When Ceph services start, the initialization process activates a set of daemons that run in the background. A :term:`Ceph Storage Cluster` runs at least three types of daemons: @@ -12,15 +12,16 @@ least three types of daemons: - :term:`Ceph Manager` (``ceph-mgr``) - :term:`Ceph OSD Daemon` (``ceph-osd``) -Ceph Storage Clusters that support the :term:`Ceph File System` also run at -least one :term:`Ceph Metadata Server` (``ceph-mds``). Clusters that support -:term:`Ceph Object Storage` run Ceph RADOS Gateway daemons (``radosgw``). +Any Ceph Storage Cluster that supports the :term:`Ceph File System` also runs +at least one :term:`Ceph Metadata Server` (``ceph-mds``). Any Cluster that +supports :term:`Ceph Object Storage` runs Ceph RADOS Gateway daemons +(``radosgw``). -Each daemon has a number of configuration options, each of which has a default -value. You may adjust the behavior of the system by changing these -configuration options. Be careful to understand the consequences before -overriding default values, as it is possible to significantly degrade the -performance and stability of your cluster. Note too that default values +Each daemon has a number of configuration options, and each of those options +has a default value. Adjust the behavior of the system by changing these +configuration options. Make sure to understand the consequences before +overriding the default values, as it is possible to significantly degrade the +performance and stability of your cluster. Remember that default values sometimes change between releases. For this reason, it is best to review the version of this documentation that applies to your Ceph release. diff --git a/ceph/doc/rados/configuration/filestore-config-ref.rst b/ceph/doc/rados/configuration/filestore-config-ref.rst index 9d65d00a6..7aefe26b3 100644 --- a/ceph/doc/rados/configuration/filestore-config-ref.rst +++ b/ceph/doc/rados/configuration/filestore-config-ref.rst @@ -4,11 +4,12 @@ .. note:: Since the Luminous release of Ceph, Filestore has not been Ceph's default storage back end. Since the Luminous release of Ceph, BlueStore has - been Ceph's default storage back end. However, Filestore OSDs are still - supported. See :ref:`OSD Back Ends - `. See :ref:`BlueStore Migration - ` for instructions explaining how to - replace an existing Filestore back end with a BlueStore back end. + been Ceph's default storage back end. However, Filestore OSDs are still + supported up to Quincy. Filestore OSDs are not supported in Reef. See + :ref:`OSD Back Ends `. See + :ref:`BlueStore Migration ` for + instructions explaining how to replace an existing Filestore back end with a + BlueStore back end. ``filestore_debug_omap_check`` diff --git a/ceph/doc/rados/configuration/mon-config-ref.rst b/ceph/doc/rados/configuration/mon-config-ref.rst index c19728ada..e0a12d093 100644 --- a/ceph/doc/rados/configuration/mon-config-ref.rst +++ b/ceph/doc/rados/configuration/mon-config-ref.rst @@ -18,27 +18,25 @@ Background Ceph Monitors maintain a "master copy" of the :term:`Cluster Map`. -The maintenance by Ceph Monitors of a :term:`Cluster Map` makes it possible for -a :term:`Ceph Client` to determine the location of all Ceph Monitors, Ceph OSD -Daemons, and Ceph Metadata Servers by connecting to one Ceph Monitor and -retrieving a current cluster map. Before Ceph Clients can read from or write to -Ceph OSD Daemons or Ceph Metadata Servers, they must connect to a Ceph Monitor. -When a Ceph client has a current copy of the cluster map and the CRUSH -algorithm, it can compute the location for any RADOS object within in the -cluster. This ability to compute the locations of objects makes it possible for -Ceph Clients to talk directly to Ceph OSD Daemons. This direct communication -with Ceph OSD Daemons represents an improvment upon traditional storage -architectures in which clients were required to communicate with a central -component, and that improvment contributes to Ceph's high scalability and -performance. See `Scalability and High Availability`_ for additional details. +The :term:`Cluster Map` makes it possible for :term:`Ceph client`\s to +determine the location of all Ceph Monitors, Ceph OSD Daemons, and Ceph +Metadata Servers. Clients do this by connecting to one Ceph Monitor and +retrieving a current cluster map. Ceph clients must connect to a Ceph Monitor +before they can read from or write to Ceph OSD Daemons or Ceph Metadata +Servers. A Ceph client that has a current copy of the cluster map and the CRUSH +algorithm can compute the location of any RADOS object within the cluster. This +makes it possible for Ceph clients to talk directly to Ceph OSD Daemons. Direct +communication between clients and Ceph OSD Daemons improves upon traditional +storage architectures that required clients to communicate with a central +component. See `Scalability and High Availability`_ for more on this subject. The Ceph Monitor's primary function is to maintain a master copy of the cluster map. Monitors also provide authentication and logging services. All changes in the monitor services are written by the Ceph Monitor to a single Paxos -instance, and Paxos writes the changes to a key/value store for strong -consistency. Ceph Monitors are able to query the most recent version of the -cluster map during sync operations, and they use the key/value store's -snapshots and iterators (using leveldb) to perform store-wide synchronization. +instance, and Paxos writes the changes to a key/value store. This provides +strong consistency. Ceph Monitors are able to query the most recent version of +the cluster map during sync operations, and they use the key/value store's +snapshots and iterators (using RocksDB) to perform store-wide synchronization. .. ditaa:: /-------------\ /-------------\ @@ -289,7 +287,6 @@ by setting it in the ``[mon]`` section of the configuration file. .. confval:: mon_data_size_warn .. confval:: mon_data_avail_warn .. confval:: mon_data_avail_crit -.. confval:: mon_warn_on_cache_pools_without_hit_sets .. confval:: mon_warn_on_crush_straw_calc_version_zero .. confval:: mon_warn_on_legacy_crush_tunables .. confval:: mon_crush_min_required_version @@ -540,6 +537,8 @@ Trimming requires that the placement groups are ``active+clean``. .. index:: Ceph Monitor; clock +.. _mon-config-ref-clock: + Clock ----- diff --git a/ceph/doc/rados/configuration/msgr2.rst b/ceph/doc/rados/configuration/msgr2.rst index c86707ad6..33fe4e022 100644 --- a/ceph/doc/rados/configuration/msgr2.rst +++ b/ceph/doc/rados/configuration/msgr2.rst @@ -91,7 +91,7 @@ Similarly, two options control whether IPv4 and IPv6 addresses are used: to an IPv6 address .. note:: The ability to bind to multiple ports has paved the way for - dual-stack IPv4 and IPv6 support. That said, dual-stack support is + dual-stack IPv4 and IPv6 support. That said, dual-stack operation is not yet supported as of Quincy v17.2.0. Connection modes diff --git a/ceph/doc/rados/configuration/osd-config-ref.rst b/ceph/doc/rados/configuration/osd-config-ref.rst index 3c3b378e7..060121200 100644 --- a/ceph/doc/rados/configuration/osd-config-ref.rst +++ b/ceph/doc/rados/configuration/osd-config-ref.rst @@ -145,17 +145,20 @@ See `Pool & PG Config Reference`_ for details. Scrubbing ========= -In addition to making multiple copies of objects, Ceph ensures data integrity by -scrubbing placement groups. Ceph scrubbing is analogous to ``fsck`` on the -object storage layer. For each placement group, Ceph generates a catalog of all -objects and compares each primary object and its replicas to ensure that no -objects are missing or mismatched. Light scrubbing (daily) checks the object -size and attributes. Deep scrubbing (weekly) reads the data and uses checksums -to ensure data integrity. - -Scrubbing is important for maintaining data integrity, but it can reduce -performance. You can adjust the following settings to increase or decrease -scrubbing operations. +One way that Ceph ensures data integrity is by "scrubbing" placement groups. +Ceph scrubbing is analogous to ``fsck`` on the object storage layer. Ceph +generates a catalog of all objects in each placement group and compares each +primary object to its replicas, ensuring that no objects are missing or +mismatched. Light scrubbing checks the object size and attributes, and is +usually done daily. Deep scrubbing reads the data and uses checksums to ensure +data integrity, and is usually done weekly. The freqeuncies of both light +scrubbing and deep scrubbing are determined by the cluster's configuration, +which is fully under your control and subject to the settings explained below +in this section. + +Although scrubbing is important for maintaining data integrity, it can reduce +the performance of the Ceph cluster. You can adjust the following settings to +increase or decrease the frequency and depth of scrubbing operations. .. confval:: osd_max_scrubs diff --git a/ceph/doc/rados/configuration/pool-pg-config-ref.rst b/ceph/doc/rados/configuration/pool-pg-config-ref.rst index 6fedd7c78..902c80346 100644 --- a/ceph/doc/rados/configuration/pool-pg-config-ref.rst +++ b/ceph/doc/rados/configuration/pool-pg-config-ref.rst @@ -1,3 +1,5 @@ +.. _rados_config_pool_pg_crush_ref: + ====================================== Pool, PG and CRUSH Config Reference ====================================== diff --git a/ceph/doc/rados/operations/add-or-rm-mons.rst b/ceph/doc/rados/operations/add-or-rm-mons.rst index 359fa7676..3688bb798 100644 --- a/ceph/doc/rados/operations/add-or-rm-mons.rst +++ b/ceph/doc/rados/operations/add-or-rm-mons.rst @@ -4,74 +4,70 @@ Adding/Removing Monitors ========================== -When you have a cluster up and running, you may add or remove monitors -from the cluster at runtime. To bootstrap a monitor, see `Manual Deployment`_ -or `Monitor Bootstrap`_. +It is possible to add monitors to a running cluster as long as redundancy is +maintained. To bootstrap a monitor, see `Manual Deployment`_ or `Monitor +Bootstrap`_. .. _adding-monitors: Adding Monitors =============== -Ceph monitors are lightweight processes that are the single source of truth -for the cluster map. You can run a cluster with 1 monitor but we recommend at least 3 -for a production cluster. Ceph monitors use a variation of the -`Paxos`_ algorithm to establish consensus about maps and other critical -information across the cluster. Due to the nature of Paxos, Ceph requires -a majority of monitors to be active to establish a quorum (thus establishing -consensus). - -It is advisable to run an odd number of monitors. An -odd number of monitors is more resilient than an -even number. For instance, with a two monitor deployment, no -failures can be tolerated and still maintain a quorum; with three monitors, -one failure can be tolerated; in a four monitor deployment, one failure can -be tolerated; with five monitors, two failures can be tolerated. This avoids -the dreaded *split brain* phenomenon, and is why an odd number is best. -In short, Ceph needs a majority of -monitors to be active (and able to communicate with each other), but that -majority can be achieved using a single monitor, or 2 out of 2 monitors, -2 out of 3, 3 out of 4, etc. +Ceph monitors serve as the single source of truth for the cluster map. It is +possible to run a cluster with only one monitor, but for a production cluster +it is recommended to have at least three monitors provisioned and in quorum. +Ceph monitors use a variation of the `Paxos`_ algorithm to maintain consensus +about maps and about other critical information across the cluster. Due to the +nature of Paxos, Ceph is able to maintain quorum (and thus establish +consensus) only if a majority of the monitors are ``active``. + +It is best to run an odd number of monitors. This is because a cluster that is +running an odd number of monitors is more resilient than a cluster running an +even number. For example, in a two-monitor deployment, no failures can be +tolerated if quorum is to be maintained; in a three-monitor deployment, one +failure can be tolerated; in a four-monitor deployment, one failure can be +tolerated; and in a five-monitor deployment, two failures can be tolerated. In +general, a cluster running an odd number of monitors is best because it avoids +what is called the *split brain* phenomenon. In short, Ceph is able to operate +only if a majority of monitors are ``active`` and able to communicate with each +other, (for example: there must be a single monitor, two out of two monitors, +two out of three monitors, three out of five monitors, or the like). For small or non-critical deployments of multi-node Ceph clusters, it is -advisable to deploy three monitors, and to increase the number of monitors -to five for larger clusters or to survive a double failure. There is rarely -justification for seven or more. - -Since monitors are lightweight, it is possible to run them on the same -host as OSDs; however, we recommend running them on separate hosts, -because `fsync` issues with the kernel may impair performance. -Dedicated monitor nodes also minimize disruption since monitor and OSD -daemons are not inactive at the same time when a node crashes or is -taken down for maintenance. - -Dedicated -monitor nodes also make for cleaner maintenance by avoiding both OSDs and -a mon going down if a node is rebooted, taken down, or crashes. +recommended to deploy three monitors. For larger clusters or for clusters that +are intended to survive a double failure, it is recommended to deploy five +monitors. Only in rare circumstances is there any justification for deploying +seven or more monitors. + +It is possible to run a monitor on the same host that is running an OSD. +However, this approach has disadvantages: for example: `fsync` issues with the +kernel might weaken performance, monitor and OSD daemons might be inactive at +the same time and cause disruption if the node crashes, is rebooted, or is +taken down for maintenance. Because of these risks, it is instead +recommended to run monitors and managers on dedicated hosts. .. note:: A *majority* of monitors in your cluster must be able to - reach each other in order to establish a quorum. + reach each other in order for quorum to be established. -Deploy your Hardware --------------------- +Deploying your Hardware +----------------------- -If you are adding a new host when adding a new monitor, see `Hardware -Recommendations`_ for details on minimum recommendations for monitor hardware. -To add a monitor host to your cluster, first make sure you have an up-to-date -version of Linux installed (typically Ubuntu 16.04 or RHEL 7). +Some operators choose to add a new monitor host at the same time that they add +a new monitor. For details on the minimum recommendations for monitor hardware, +see `Hardware Recommendations`_. Before adding a monitor host to the cluster, +make sure that there is an up-to-date version of Linux installed. -Add your monitor host to a rack in your cluster, connect it to the network -and ensure that it has network connectivity. +Add the newly installed monitor host to a rack in your cluster, connect the +host to the network, and make sure that the host has network connectivity. .. _Hardware Recommendations: ../../../start/hardware-recommendations -Install the Required Software ------------------------------ +Installing the Required Software +-------------------------------- -For manually deployed clusters, you must install Ceph packages -manually. See `Installing Packages`_ for details. -You should configure SSH to a user with password-less authentication -and root permissions. +In manually deployed clusters, it is necessary to install Ceph packages +manually. For details, see `Installing Packages`_. Configure SSH so that it can +be used by a user that has passwordless authentication and root permissions. .. _Installing Packages: ../../../install/install-storage-cluster @@ -81,67 +77,65 @@ and root permissions. Adding a Monitor (Manual) ------------------------- -This procedure creates a ``ceph-mon`` data directory, retrieves the monitor map -and monitor keyring, and adds a ``ceph-mon`` daemon to your cluster. If -this results in only two monitor daemons, you may add more monitors by -repeating this procedure until you have a sufficient number of ``ceph-mon`` -daemons to achieve a quorum. +The procedure in this section creates a ``ceph-mon`` data directory, retrieves +both the monitor map and the monitor keyring, and adds a ``ceph-mon`` daemon to +the cluster. The procedure might result in a Ceph cluster that contains only +two monitor daemons. To add more monitors until there are enough ``ceph-mon`` +daemons to establish quorum, repeat the procedure. -At this point you should define your monitor's id. Traditionally, monitors -have been named with single letters (``a``, ``b``, ``c``, ...), but you are -free to define the id as you see fit. For the purpose of this document, -please take into account that ``{mon-id}`` should be the id you chose, -without the ``mon.`` prefix (i.e., ``{mon-id}`` should be the ``a`` -on ``mon.a``). +This is a good point at which to define the new monitor's ``id``. Monitors have +often been named with single letters (``a``, ``b``, ``c``, etc.), but you are +free to define the ``id`` however you see fit. In this document, ``{mon-id}`` +refers to the ``id`` exclusive of the ``mon.`` prefix: for example, if +``mon.a`` has been chosen as the ``id`` of a monitor, then ``{mon-id}`` is +``a``. ??? -#. Create the default directory on the machine that will host your - new monitor: +#. Create a data directory on the machine that will host the new monitor: .. prompt:: bash $ - ssh {new-mon-host} - sudo mkdir /var/lib/ceph/mon/ceph-{mon-id} + ssh {new-mon-host} + sudo mkdir /var/lib/ceph/mon/ceph-{mon-id} -#. Create a temporary directory ``{tmp}`` to keep the files needed during - this process. This directory should be different from the monitor's default - directory created in the previous step, and can be removed after all the - steps are executed: +#. Create a temporary directory ``{tmp}`` that will contain the files needed + during this procedure. This directory should be different from the data + directory created in the previous step. Because this is a temporary + directory, it can be removed after the procedure is complete: .. prompt:: bash $ - mkdir {tmp} + mkdir {tmp} -#. Retrieve the keyring for your monitors, where ``{tmp}`` is the path to - the retrieved keyring, and ``{key-filename}`` is the name of the file - containing the retrieved monitor key: +#. Retrieve the keyring for your monitors (``{tmp}`` is the path to the + retrieved keyring and ``{key-filename}`` is the name of the file that + contains the retrieved monitor key): .. prompt:: bash $ ceph auth get mon. -o {tmp}/{key-filename} -#. Retrieve the monitor map, where ``{tmp}`` is the path to - the retrieved monitor map, and ``{map-filename}`` is the name of the file - containing the retrieved monitor map: +#. Retrieve the monitor map (``{tmp}`` is the path to the retrieved monitor map + and ``{map-filename}`` is the name of the file that contains the retrieved + monitor map): .. prompt:: bash $ ceph mon getmap -o {tmp}/{map-filename} -#. Prepare the monitor's data directory created in the first step. You must - specify the path to the monitor map so that you can retrieve the - information about a quorum of monitors and their ``fsid``. You must also - specify a path to the monitor keyring: - +#. Prepare the monitor's data directory, which was created in the first step. + The following command must specify the path to the monitor map (so that + information about a quorum of monitors and their ``fsid``\s can be + retrieved) and specify the path to the monitor keyring: + .. prompt:: bash $ sudo ceph-mon -i {mon-id} --mkfs --monmap {tmp}/{map-filename} --keyring {tmp}/{key-filename} - -#. Start the new monitor and it will automatically join the cluster. - The daemon needs to know which address to bind to, via either the - ``--public-addr {ip}`` or ``--public-network {network}`` argument. +#. Start the new monitor. It will automatically join the cluster. To provide + information to the daemon about which address to bind to, use either the + ``--public-addr {ip}`` option or the ``--public-network {network}`` option. For example: - + .. prompt:: bash $ ceph-mon -i {mon-id} --public-addr {ip:port} @@ -151,44 +145,47 @@ on ``mon.a``). Removing Monitors ================= -When you remove monitors from a cluster, consider that Ceph monitors use -Paxos to establish consensus about the master cluster map. You must have -a sufficient number of monitors to establish a quorum for consensus about -the cluster map. +When monitors are removed from a cluster, it is important to remember +that Ceph monitors use Paxos to maintain consensus about the cluster +map. Such consensus is possible only if the number of monitors is sufficient +to establish quorum. + .. _Removing a Monitor (Manual): Removing a Monitor (Manual) --------------------------- -This procedure removes a ``ceph-mon`` daemon from your cluster. If this -procedure results in only two monitor daemons, you may add or remove another -monitor until you have a number of ``ceph-mon`` daemons that can achieve a -quorum. +The procedure in this section removes a ``ceph-mon`` daemon from the cluster. +The procedure might result in a Ceph cluster that contains a number of monitors +insufficient to maintain quorum, so plan carefully. When replacing an old +monitor with a new monitor, add the new monitor first, wait for quorum to be +established, and then remove the old monitor. This ensures that quorum is not +lost. + #. Stop the monitor: .. prompt:: bash $ service ceph -a stop mon.{mon-id} - + #. Remove the monitor from the cluster: .. prompt:: bash $ ceph mon remove {mon-id} - -#. Remove the monitor entry from ``ceph.conf``. + +#. Remove the monitor entry from the ``ceph.conf`` file: .. _rados-mon-remove-from-unhealthy: + Removing Monitors from an Unhealthy Cluster ------------------------------------------- -This procedure removes a ``ceph-mon`` daemon from an unhealthy -cluster, for example a cluster where the monitors cannot form a -quorum. - +The procedure in this section removes a ``ceph-mon`` daemon from an unhealthy +cluster (for example, a cluster whose monitors are unable to form a quorum). #. Stop all ``ceph-mon`` daemons on all monitor hosts: @@ -197,63 +194,68 @@ quorum. ssh {mon-host} systemctl stop ceph-mon.target - Repeat for all monitor hosts. + Repeat this step on every monitor host. -#. Identify a surviving monitor and log in to that host: +#. Identify a surviving monitor and log in to the monitor's host: .. prompt:: bash $ ssh {mon-host} -#. Extract a copy of the monmap file: +#. Extract a copy of the ``monmap`` file by running a command of the following + form: .. prompt:: bash $ ceph-mon -i {mon-id} --extract-monmap {map-path} - In most cases, this command will be: + Here is a more concrete example. In this example, ``hostname`` is the + ``{mon-id}`` and ``/tmp/monpap`` is the ``{map-path}``: .. prompt:: bash $ ceph-mon -i `hostname` --extract-monmap /tmp/monmap -#. Remove the non-surviving or problematic monitors. For example, if - you have three monitors, ``mon.a``, ``mon.b``, and ``mon.c``, where - only ``mon.a`` will survive, follow the example below: +#. Remove the non-surviving or otherwise problematic monitors: .. prompt:: bash $ monmaptool {map-path} --rm {mon-id} - For example, + For example, suppose that there are three monitors |---| ``mon.a``, ``mon.b``, + and ``mon.c`` |---| and that only ``mon.a`` will survive: .. prompt:: bash $ monmaptool /tmp/monmap --rm b monmaptool /tmp/monmap --rm c - -#. Inject the surviving map with the removed monitors into the - surviving monitor(s). For example, to inject a map into monitor - ``mon.a``, follow the example below: + +#. Inject the surviving map that includes the removed monitors into the + monmap of the surviving monitor(s): .. prompt:: bash $ ceph-mon -i {mon-id} --inject-monmap {map-path} - For example: + Continuing with the above example, inject a map into monitor ``mon.a`` by + running the following command: .. prompt:: bash $ ceph-mon -i a --inject-monmap /tmp/monmap + #. Start only the surviving monitors. -#. Verify the monitors form a quorum (``ceph -s``). +#. Verify that the monitors form a quorum by running the command ``ceph -s``. -#. You may wish to archive the removed monitors' data directory in - ``/var/lib/ceph/mon`` in a safe location, or delete it if you are - confident the remaining monitors are healthy and are sufficiently - redundant. +#. The data directory of the removed monitors is in ``/var/lib/ceph/mon``: + either archive this data directory in a safe location or delete this data + directory. However, do not delete it unless you are confident that the + remaining monitors are healthy and sufficiently redundant. Make sure that + there is enough room for the live DB to expand and compact, and make sure + that there is also room for an archived copy of the DB. The archived copy + can be compressed. .. _Changing a Monitor's IP address: @@ -262,185 +264,195 @@ Changing a Monitor's IP Address .. important:: Existing monitors are not supposed to change their IP addresses. -Monitors are critical components of a Ceph cluster, and they need to maintain a -quorum for the whole system to work properly. To establish a quorum, the -monitors need to discover each other. Ceph has strict requirements for -discovering monitors. +Monitors are critical components of a Ceph cluster. The entire system can work +properly only if the monitors maintain quorum, and quorum can be established +only if the monitors have discovered each other by means of their IP addresses. +Ceph has strict requirements on the discovery of monitors. -Ceph clients and other Ceph daemons use ``ceph.conf`` to discover monitors. -However, monitors discover each other using the monitor map, not ``ceph.conf``. -For example, if you refer to `Adding a Monitor (Manual)`_ you will see that you -need to obtain the current monmap for the cluster when creating a new monitor, -as it is one of the required arguments of ``ceph-mon -i {mon-id} --mkfs``. The -following sections explain the consistency requirements for Ceph monitors, and a -few safe ways to change a monitor's IP address. +Although the ``ceph.conf`` file is used by Ceph clients and other Ceph daemons +to discover monitors, the monitor map is used by monitors to discover each +other. This is why it is necessary to obtain the current ``monmap`` at the time +a new monitor is created: as can be seen above in `Adding a Monitor (Manual)`_, +the ``monmap`` is one of the arguments required by the ``ceph-mon -i {mon-id} +--mkfs`` command. The following sections explain the consistency requirements +for Ceph monitors, and also explain a number of safe ways to change a monitor's +IP address. Consistency Requirements ------------------------ -A monitor always refers to the local copy of the monmap when discovering other -monitors in the cluster. Using the monmap instead of ``ceph.conf`` avoids -errors that could break the cluster (e.g., typos in ``ceph.conf`` when -specifying a monitor address or port). Since monitors use monmaps for discovery -and they share monmaps with clients and other Ceph daemons, the monmap provides -monitors with a strict guarantee that their consensus is valid. +When a monitor discovers other monitors in the cluster, it always refers to the +local copy of the monitor map. Using the monitor map instead of using the +``ceph.conf`` file avoids errors that could break the cluster (for example, +typos or other slight errors in ``ceph.conf`` when a monitor address or port is +specified). Because monitors use monitor maps for discovery and because they +share monitor maps with Ceph clients and other Ceph daemons, the monitor map +provides monitors with a strict guarantee that their consensus is valid. Strict consistency also applies to updates to the monmap. As with any other updates on the monitor, changes to the monmap always run through a distributed consensus algorithm called `Paxos`_. The monitors must agree on each update to -the monmap, such as adding or removing a monitor, to ensure that each monitor in -the quorum has the same version of the monmap. Updates to the monmap are +the monmap, such as adding or removing a monitor, to ensure that each monitor +in the quorum has the same version of the monmap. Updates to the monmap are incremental so that monitors have the latest agreed upon version, and a set of -previous versions, allowing a monitor that has an older version of the monmap to -catch up with the current state of the cluster. - -If monitors discovered each other through the Ceph configuration file instead of -through the monmap, it would introduce additional risks because the Ceph -configuration files are not updated and distributed automatically. Monitors -might inadvertently use an older ``ceph.conf`` file, fail to recognize a -monitor, fall out of a quorum, or develop a situation where `Paxos`_ is not able -to determine the current state of the system accurately. Consequently, making -changes to an existing monitor's IP address must be done with great care. - - -Changing a Monitor's IP address (The Right Way) ------------------------------------------------ - -Changing a monitor's IP address in ``ceph.conf`` only is not sufficient to -ensure that other monitors in the cluster will receive the update. To change a -monitor's IP address, you must add a new monitor with the IP address you want -to use (as described in `Adding a Monitor (Manual)`_), ensure that the new -monitor successfully joins the quorum; then, remove the monitor that uses the -old IP address. Then, update the ``ceph.conf`` file to ensure that clients and -other daemons know the IP address of the new monitor. - -For example, lets assume there are three monitors in place, such as :: - - [mon.a] - host = host01 - addr = 10.0.0.1:6789 - [mon.b] - host = host02 - addr = 10.0.0.2:6789 - [mon.c] - host = host03 - addr = 10.0.0.3:6789 - -To change ``mon.c`` to ``host04`` with the IP address ``10.0.0.4``, follow the -steps in `Adding a Monitor (Manual)`_ by adding a new monitor ``mon.d``. Ensure -that ``mon.d`` is running before removing ``mon.c``, or it will break the -quorum. Remove ``mon.c`` as described on `Removing a Monitor (Manual)`_. Moving -all three monitors would thus require repeating this process as many times as -needed. - - -Changing a Monitor's IP address (The Messy Way) ------------------------------------------------ - -There may come a time when the monitors must be moved to a different network, a -different part of the datacenter or a different datacenter altogether. While it -is possible to do it, the process becomes a bit more hazardous. - -In such a case, the solution is to generate a new monmap with updated IP -addresses for all the monitors in the cluster, and inject the new map on each -individual monitor. This is not the most user-friendly approach, but we do not -expect this to be something that needs to be done every other week. As it is -clearly stated on the top of this section, monitors are not supposed to change -IP addresses. - -Using the previous monitor configuration as an example, assume you want to move -all the monitors from the ``10.0.0.x`` range to ``10.1.0.x``, and these -networks are unable to communicate. Use the following procedure: - -#. Retrieve the monitor map, where ``{tmp}`` is the path to - the retrieved monitor map, and ``{filename}`` is the name of the file - containing the retrieved monitor map: +previous versions, allowing a monitor that has an older version of the monmap +to catch up with the current state of the cluster. + +There are additional advantages to using the monitor map rather than +``ceph.conf`` when monitors discover each other. Because ``ceph.conf`` is not +automatically updated and distributed, its use would bring certain risks: +monitors might use an outdated ``ceph.conf`` file, might fail to recognize a +specific monitor, might fall out of quorum, and might develop a situation in +which `Paxos`_ is unable to accurately ascertain the current state of the +system. Because of these risks, any changes to an existing monitor's IP address +must be made with great care. + +.. _operations_add_or_rm_mons_changing_mon_ip: + +Changing a Monitor's IP address (Preferred Method) +-------------------------------------------------- + +If a monitor's IP address is changed only in the ``ceph.conf`` file, there is +no guarantee that the other monitors in the cluster will receive the update. +For this reason, the preferred method to change a monitor's IP address is as +follows: add a new monitor with the desired IP address (as described in `Adding +a Monitor (Manual)`_), make sure that the new monitor successfully joins the +quorum, remove the monitor that is using the old IP address, and update the +``ceph.conf`` file to ensure that clients and other daemons are made aware of +the new monitor's IP address. + +For example, suppose that there are three monitors in place:: + + [mon.a] + host = host01 + addr = 10.0.0.1:6789 + [mon.b] + host = host02 + addr = 10.0.0.2:6789 + [mon.c] + host = host03 + addr = 10.0.0.3:6789 + +To change ``mon.c`` so that its name is ``host04`` and its IP address is +``10.0.0.4``: (1) follow the steps in `Adding a Monitor (Manual)`_ to add a new +monitor ``mon.d``, (2) make sure that ``mon.d`` is running before removing +``mon.c`` or else quorum will be broken, and (3) follow the steps in `Removing +a Monitor (Manual)`_ to remove ``mon.c``. To move all three monitors to new IP +addresses, repeat this process. + +Changing a Monitor's IP address (Advanced Method) +------------------------------------------------- + +There are cases in which the method outlined in :ref"` operations_add_or_rm_mons_changing_mon_ip` cannot +be used. For example, it might be necessary to move the cluster's monitors to a +different network, to a different part of the datacenter, or to a different +datacenter altogether. It is still possible to change the monitors' IP +addresses, but a different method must be used. + +For such cases, a new monitor map with updated IP addresses for every monitor +in the cluster must be generated and injected on each monitor. Although this +method is not particularly easy, such a major migration is unlikely to be a +routine task. As stated at the beginning of this section, existing monitors are +not supposed to change their IP addresses. + +Continue with the monitor configuration in the example from :ref"` +operations_add_or_rm_mons_changing_mon_ip` . Suppose that all of the monitors +are to be moved from the ``10.0.0.x`` range to the ``10.1.0.x`` range, and that +these networks are unable to communicate. Carry out the following procedure: + +#. Retrieve the monitor map (``{tmp}`` is the path to the retrieved monitor + map, and ``{filename}`` is the name of the file that contains the retrieved + monitor map): .. prompt:: bash $ ceph mon getmap -o {tmp}/{filename} -#. The following example demonstrates the contents of the monmap: +#. Check the contents of the monitor map: .. prompt:: bash $ monmaptool --print {tmp}/{filename} - :: + :: - monmaptool: monmap file {tmp}/{filename} - epoch 1 - fsid 224e376d-c5fe-4504-96bb-ea6332a19e61 - last_changed 2012-12-17 02:46:41.591248 - created 2012-12-17 02:46:41.591248 - 0: 10.0.0.1:6789/0 mon.a - 1: 10.0.0.2:6789/0 mon.b - 2: 10.0.0.3:6789/0 mon.c + monmaptool: monmap file {tmp}/{filename} + epoch 1 + fsid 224e376d-c5fe-4504-96bb-ea6332a19e61 + last_changed 2012-12-17 02:46:41.591248 + created 2012-12-17 02:46:41.591248 + 0: 10.0.0.1:6789/0 mon.a + 1: 10.0.0.2:6789/0 mon.b + 2: 10.0.0.3:6789/0 mon.c -#. Remove the existing monitors: +#. Remove the existing monitors from the monitor map: .. prompt:: bash $ monmaptool --rm a --rm b --rm c {tmp}/{filename} - :: - monmaptool: monmap file {tmp}/{filename} - monmaptool: removing a - monmaptool: removing b - monmaptool: removing c - monmaptool: writing epoch 1 to {tmp}/{filename} (0 monitors) + monmaptool: monmap file {tmp}/{filename} + monmaptool: removing a + monmaptool: removing b + monmaptool: removing c + monmaptool: writing epoch 1 to {tmp}/{filename} (0 monitors) -#. Add the new monitor locations: +#. Add the new monitor locations to the monitor map: .. prompt:: bash $ monmaptool --add a 10.1.0.1:6789 --add b 10.1.0.2:6789 --add c 10.1.0.3:6789 {tmp}/{filename} - :: - + monmaptool: monmap file {tmp}/{filename} monmaptool: writing epoch 1 to {tmp}/{filename} (3 monitors) -#. Check new contents: +#. Check the new contents of the monitor map: .. prompt:: bash $ monmaptool --print {tmp}/{filename} - + :: - monmaptool: monmap file {tmp}/{filename} - epoch 1 - fsid 224e376d-c5fe-4504-96bb-ea6332a19e61 - last_changed 2012-12-17 02:46:41.591248 - created 2012-12-17 02:46:41.591248 - 0: 10.1.0.1:6789/0 mon.a - 1: 10.1.0.2:6789/0 mon.b - 2: 10.1.0.3:6789/0 mon.c + monmaptool: monmap file {tmp}/{filename} + epoch 1 + fsid 224e376d-c5fe-4504-96bb-ea6332a19e61 + last_changed 2012-12-17 02:46:41.591248 + created 2012-12-17 02:46:41.591248 + 0: 10.1.0.1:6789/0 mon.a + 1: 10.1.0.2:6789/0 mon.b + 2: 10.1.0.3:6789/0 mon.c -At this point, we assume the monitors (and stores) are installed at the new -location. The next step is to propagate the modified monmap to the new -monitors, and inject the modified monmap into each new monitor. +At this point, we assume that the monitors (and stores) have been installed at +the new location. Next, propagate the modified monitor map to the new monitors, +and inject the modified monitor map into each new monitor. -#. First, make sure to stop all your monitors. Injection must be done while - the daemon is not running. +#. Make sure all of your monitors have been stopped. Never inject into a + monitor while the monitor daemon is running. -#. Inject the monmap: +#. Inject the monitor map: .. prompt:: bash $ ceph-mon -i {mon-id} --inject-monmap {tmp}/{filename} -#. Restart the monitors. +#. Restart all of the monitors. + +Migration to the new location is now complete. The monitors should operate +successfully. -After this step, migration to the new location is complete and -the monitors should operate successfully. .. _Manual Deployment: ../../../install/manual-deployment .. _Monitor Bootstrap: ../../../dev/mon-bootstrap .. _Paxos: https://en.wikipedia.org/wiki/Paxos_(computer_science) + +.. |---| unicode:: U+2014 .. EM DASH + :trim: diff --git a/ceph/doc/rados/operations/balancer.rst b/ceph/doc/rados/operations/balancer.rst index a9a980f7c..aa4eab93c 100644 --- a/ceph/doc/rados/operations/balancer.rst +++ b/ceph/doc/rados/operations/balancer.rst @@ -1,7 +1,7 @@ .. _balancer: -Balancer -======== +Balancer Module +======================= The *balancer* can optimize the allocation of placement groups (PGs) across OSDs in order to achieve a balanced distribution. The balancer can operate diff --git a/ceph/doc/rados/operations/control.rst b/ceph/doc/rados/operations/control.rst index dab7e0e79..033f831cd 100644 --- a/ceph/doc/rados/operations/control.rst +++ b/ceph/doc/rados/operations/control.rst @@ -106,22 +106,27 @@ to be considered ``stuck`` (default: 300). PGs might be stuck in any of the following states: **Inactive** + PGs are unable to process reads or writes because they are waiting for an OSD that has the most up-to-date data to return to an ``up`` state. + **Unclean** + PGs contain objects that have not been replicated the desired number of times. These PGs have not yet completed the process of recovering. + **Stale** + PGs are in an unknown state, because the OSDs that host them have not reported to the monitor cluster for a certain period of time (specified by the ``mon_osd_report_timeout`` configuration setting). -To delete a ``lost`` RADOS object or revert an object to its prior state -(either by reverting it to its previous version or by deleting it because it -was just created and has no previous version), run the following command: +To delete a ``lost`` object or revert an object to its prior state, either by +reverting it to its previous version or by deleting it because it was just +created and has no previous version, run the following command: .. prompt:: bash $ @@ -168,10 +173,8 @@ To dump the OSD map, run the following command: ceph osd dump [--format {format}] The ``--format`` option accepts the following arguments: ``plain`` (default), -``json``, ``json-pretty``, ``xml``, and ``xml-pretty``. As noted above, JSON -format is the recommended format for consumption by tools, scripting, and other -forms of automation. - +``json``, ``json-pretty``, ``xml``, and ``xml-pretty``. As noted above, JSON is +the recommended format for tools, scripting, and other forms of automation. To dump the OSD map as a tree that lists one OSD per line and displays information about the weights and states of the OSDs, run the following @@ -230,7 +233,7 @@ To mark an OSD as ``lost``, run the following command: .. warning:: This could result in permanent data loss. Use with caution! -To create an OSD in the CRUSH map, run the following command: +To create a new OSD, run the following command: .. prompt:: bash $ @@ -287,47 +290,51 @@ following command: ceph osd in {osd-num} -By using the ``pause`` and ``unpause`` flags in the OSD map, you can pause or -unpause I/O requests. If the flags are set, then no I/O requests will be sent -to any OSD. If the flags are cleared, then pending I/O requests will be resent. -To set or clear these flags, run one of the following commands: +By using the "pause flags" in the OSD map, you can pause or unpause I/O +requests. If the flags are set, then no I/O requests will be sent to any OSD. +When the flags are cleared, then pending I/O requests will be resent. To set or +clear pause flags, run one of the following commands: .. prompt:: bash $ ceph osd pause ceph osd unpause -You can assign an override or ``reweight`` weight value to a specific OSD -if the normal CRUSH distribution seems to be suboptimal. The weight of an -OSD helps determine the extent of its I/O requests and data storage: two -OSDs with the same weight will receive approximately the same number of -I/O requests and store approximately the same amount of data. The ``ceph -osd reweight`` command assigns an override weight to an OSD. The weight -value is in the range 0 to 1, and the command forces CRUSH to relocate a -certain amount (1 - ``weight``) of the data that would otherwise be on -this OSD. The command does not change the weights of the buckets above -the OSD in the CRUSH map. Using the command is merely a corrective -measure: for example, if one of your OSDs is at 90% and the others are at -50%, you could reduce the outlier weight to correct this imbalance. To -assign an override weight to a specific OSD, run the following command: +You can assign an override or ``reweight`` weight value to a specific OSD if +the normal CRUSH distribution seems to be suboptimal. The weight of an OSD +helps determine the extent of its I/O requests and data storage: two OSDs with +the same weight will receive approximately the same number of I/O requests and +store approximately the same amount of data. The ``ceph osd reweight`` command +assigns an override weight to an OSD. The weight value is in the range 0 to 1, +and the command forces CRUSH to relocate a certain amount (1 - ``weight``) of +the data that would otherwise be on this OSD. The command does not change the +weights of the buckets above the OSD in the CRUSH map. Using the command is +merely a corrective measure: for example, if one of your OSDs is at 90% and the +others are at 50%, you could reduce the outlier weight to correct this +imbalance. To assign an override weight to a specific OSD, run the following +command: .. prompt:: bash $ ceph osd reweight {osd-num} {weight} +.. note:: Any assigned override reweight value will conflict with the balancer. + This means that if the balancer is in use, all override reweight values + should be ``1.0000`` in order to avoid suboptimal cluster behavior. + A cluster's OSDs can be reweighted in order to maintain balance if some OSDs are being disproportionately utilized. Note that override or ``reweight`` -weights have relative values that default to 1.00000. Their values are not -absolute, and these weights must be distinguished from CRUSH weights (which -reflect the absolute capacity of a bucket, as measured in TiB). To reweight -OSDs by utilization, run the following command: +weights have values relative to one another that default to 1.00000; their +values are not absolute, and these weights must be distinguished from CRUSH +weights (which reflect the absolute capacity of a bucket, as measured in TiB). +To reweight OSDs by utilization, run the following command: .. prompt:: bash $ ceph osd reweight-by-utilization [threshold [max_change [max_osds]]] [--no-increasing] -By default, this command adjusts the override weight of OSDs that have ±20% -of the average utilization, but you can specify a different percentage in the +By default, this command adjusts the override weight of OSDs that have ±20% of +the average utilization, but you can specify a different percentage in the ``threshold`` argument. To limit the increment by which any OSD's reweight is to be changed, use the @@ -355,42 +362,38 @@ Operators of deployments that utilize Nautilus or newer (or later revisions of Luminous and Mimic) and that have no pre-Luminous clients might likely instead want to enable the `balancer`` module for ``ceph-mgr``. -.. note:: The ``balancer`` module does the work for you and achieves a more - uniform result, shuffling less data along the way. When enabling the - ``balancer`` module, you will want to converge any changed override weights - back to 1.00000 so that the balancer can do an optimal job. If your cluster - is very full, reverting these override weights before enabling the balancer - may cause some OSDs to become full. This means that a phased approach may - needed. - -Add/remove an IP address or CIDR range to/from the blocklist. -When adding to the blocklist, -you can specify how long it should be blocklisted in seconds; otherwise, -it will default to 1 hour. A blocklisted address is prevented from -connecting to any OSD. If you blocklist an IP or range containing an OSD, be aware -that OSD will also be prevented from performing operations on its peers where it -acts as a client. (This includes tiering and copy-from functionality.) - -If you want to blocklist a range (in CIDR format), you may do so by -including the ``range`` keyword. - -These commands are mostly only useful for failure testing, as -blocklists are normally maintained automatically and shouldn't need -manual intervention. : +The blocklist can be modified by adding or removing an IP address or a CIDR +range. If an address is blocklisted, it will be unable to connect to any OSD. +If an OSD is contained within an IP address or CIDR range that has been +blocklisted, the OSD will be unable to perform operations on its peers when it +acts as a client: such blocked operations include tiering and copy-from +functionality. To add or remove an IP address or CIDR range to the blocklist, +run one of the following commands: .. prompt:: bash $ ceph osd blocklist ["range"] add ADDRESS[:source_port][/netmask_bits] [TIME] ceph osd blocklist ["range"] rm ADDRESS[:source_port][/netmask_bits] -Creates/deletes a snapshot of a pool. : +If you add something to the blocklist with the above ``add`` command, you can +use the ``TIME`` keyword to specify the length of time (in seconds) that it +will remain on the blocklist (default: one hour). To add or remove a CIDR +range, use the ``range`` keyword in the above commands. + +Note that these commands are useful primarily in failure testing. Under normal +conditions, blocklists are maintained automatically and do not need any manual +intervention. + +To create or delete a snapshot of a specific storage pool, run one of the +following commands: .. prompt:: bash $ ceph osd pool mksnap {pool-name} {snap-name} ceph osd pool rmsnap {pool-name} {snap-name} -Creates/deletes/renames a storage pool. : +To create, delete, or rename a specific storage pool, run one of the following +commands: .. prompt:: bash $ @@ -398,20 +401,20 @@ Creates/deletes/renames a storage pool. : ceph osd pool delete {pool-name} [{pool-name} --yes-i-really-really-mean-it] ceph osd pool rename {old-name} {new-name} -Changes a pool setting. : +To change a pool setting, run the following command: .. prompt:: bash $ ceph osd pool set {pool-name} {field} {value} -Valid fields are: +The following are valid fields: - * ``size``: Sets the number of copies of data in the pool. - * ``pg_num``: The placement group number. - * ``pgp_num``: Effective number when calculating pg placement. - * ``crush_rule``: rule number for mapping placement. + * ``size``: The number of copies of data in the pool. + * ``pg_num``: The PG number. + * ``pgp_num``: The effective number of PGs when calculating placement. + * ``crush_rule``: The rule number for mapping placement. -Get the value of a pool setting. : +To retrieve the value of a pool setting, run the following command: .. prompt:: bash $ @@ -419,40 +422,43 @@ Get the value of a pool setting. : Valid fields are: - * ``pg_num``: The placement group number. - * ``pgp_num``: Effective number of placement groups when calculating placement. + * ``pg_num``: The PG number. + * ``pgp_num``: The effective number of PGs when calculating placement. - -Sends a scrub command to OSD ``{osd-num}``. To send the command to all OSDs, use ``*``. : +To send a scrub command to a specific OSD, or to all OSDs (by using ``*``), run +the following command: .. prompt:: bash $ ceph osd scrub {osd-num} -Sends a repair command to OSD.N. To send the command to all OSDs, use ``*``. : +To send a repair command to a specific OSD, or to all OSDs (by using ``*``), +run the following command: .. prompt:: bash $ ceph osd repair N -Runs a simple throughput benchmark against OSD.N, writing ``TOTAL_DATA_BYTES`` -in write requests of ``BYTES_PER_WRITE`` each. By default, the test -writes 1 GB in total in 4-MB increments. -The benchmark is non-destructive and will not overwrite existing live -OSD data, but might temporarily affect the performance of clients -concurrently accessing the OSD. : +You can run a simple throughput benchmark test against a specific OSD. This +test writes a total size of ``TOTAL_DATA_BYTES`` (default: 1 GB) incrementally, +in multiple write requests that each have a size of ``BYTES_PER_WRITE`` +(default: 4 MB). The test is not destructive and it will not overwrite existing +live OSD data, but it might temporarily affect the performance of clients that +are concurrently accessing the OSD. To launch this benchmark test, run the +following command: .. prompt:: bash $ ceph tell osd.N bench [TOTAL_DATA_BYTES] [BYTES_PER_WRITE] -To clear an OSD's caches between benchmark runs, use the 'cache drop' command : +To clear the caches of a specific OSD during the interval between one benchmark +run and another, run the following command: .. prompt:: bash $ ceph tell osd.N cache drop -To get the cache statistics of an OSD, use the 'cache status' command : +To retrieve the cache statistics of a specific OSD, run the following command: .. prompt:: bash $ @@ -461,7 +467,8 @@ To get the cache statistics of an OSD, use the 'cache status' command : MDS Subsystem ============= -Change configuration parameters on a running mds. : +To change the configuration parameters of a running metadata server, run the +following command: .. prompt:: bash $ @@ -473,19 +480,20 @@ Example: ceph tell mds.0 config set debug_ms 1 -Enables debug messages. : +To enable debug messages, run the following command: .. prompt:: bash $ ceph mds stat -Displays the status of all metadata servers. : +To display the status of all metadata servers, run the following command: .. prompt:: bash $ ceph mds fail 0 -Marks the active MDS as failed, triggering failover to a standby if present. +To mark the active metadata server as failed (and to trigger failover to a +standby if a standby is present), run the following command: .. todo:: ``ceph mds`` subcommands missing docs: set, dump, getmap, stop, setmap @@ -493,157 +501,165 @@ Marks the active MDS as failed, triggering failover to a standby if present. Mon Subsystem ============= -Show monitor stats: +To display monitor statistics, run the following command: .. prompt:: bash $ ceph mon stat -:: +This command returns output similar to the following: - e2: 3 mons at {a=127.0.0.1:40000/0,b=127.0.0.1:40001/0,c=127.0.0.1:40002/0}, election epoch 6, quorum 0,1,2 a,b,c +:: + e2: 3 mons at {a=127.0.0.1:40000/0,b=127.0.0.1:40001/0,c=127.0.0.1:40002/0}, election epoch 6, quorum 0,1,2 a,b,c -The ``quorum`` list at the end lists monitor nodes that are part of the current quorum. +There is a ``quorum`` list at the end of the output. It lists those monitor +nodes that are part of the current quorum. -This is also available more directly: +To retrieve this information in a more direct way, run the following command: .. prompt:: bash $ ceph quorum_status -f json-pretty - -.. code-block:: javascript - - { - "election_epoch": 6, - "quorum": [ - 0, - 1, - 2 - ], - "quorum_names": [ - "a", - "b", - "c" - ], - "quorum_leader_name": "a", - "monmap": { - "epoch": 2, - "fsid": "ba807e74-b64f-4b72-b43f-597dfe60ddbc", - "modified": "2016-12-26 14:42:09.288066", - "created": "2016-12-26 14:42:03.573585", - "features": { - "persistent": [ - "kraken" - ], - "optional": [] - }, - "mons": [ - { - "rank": 0, - "name": "a", - "addr": "127.0.0.1:40000\/0", - "public_addr": "127.0.0.1:40000\/0" - }, - { - "rank": 1, - "name": "b", - "addr": "127.0.0.1:40001\/0", - "public_addr": "127.0.0.1:40001\/0" - }, - { - "rank": 2, - "name": "c", - "addr": "127.0.0.1:40002\/0", - "public_addr": "127.0.0.1:40002\/0" - } - ] - } - } - + +This command returns output similar to the following: + +.. code-block:: javascript + + { + "election_epoch": 6, + "quorum": [ + 0, + 1, + 2 + ], + "quorum_names": [ + "a", + "b", + "c" + ], + "quorum_leader_name": "a", + "monmap": { + "epoch": 2, + "fsid": "ba807e74-b64f-4b72-b43f-597dfe60ddbc", + "modified": "2016-12-26 14:42:09.288066", + "created": "2016-12-26 14:42:03.573585", + "features": { + "persistent": [ + "kraken" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "a", + "addr": "127.0.0.1:40000\/0", + "public_addr": "127.0.0.1:40000\/0" + }, + { + "rank": 1, + "name": "b", + "addr": "127.0.0.1:40001\/0", + "public_addr": "127.0.0.1:40001\/0" + }, + { + "rank": 2, + "name": "c", + "addr": "127.0.0.1:40002\/0", + "public_addr": "127.0.0.1:40002\/0" + } + ] + } + } + The above will block until a quorum is reached. -For a status of just a single monitor: +To see the status of a specific monitor, run the following command: .. prompt:: bash $ ceph tell mon.[name] mon_status - -where the value of ``[name]`` can be taken from ``ceph quorum_status``. Sample -output:: - - { - "name": "b", - "rank": 1, - "state": "peon", - "election_epoch": 6, - "quorum": [ - 0, - 1, - 2 - ], - "features": { - "required_con": "9025616074522624", - "required_mon": [ - "kraken" - ], - "quorum_con": "1152921504336314367", - "quorum_mon": [ - "kraken" - ] - }, - "outside_quorum": [], - "extra_probe_peers": [], - "sync_provider": [], - "monmap": { - "epoch": 2, - "fsid": "ba807e74-b64f-4b72-b43f-597dfe60ddbc", - "modified": "2016-12-26 14:42:09.288066", - "created": "2016-12-26 14:42:03.573585", - "features": { - "persistent": [ - "kraken" - ], - "optional": [] - }, - "mons": [ - { - "rank": 0, - "name": "a", - "addr": "127.0.0.1:40000\/0", - "public_addr": "127.0.0.1:40000\/0" - }, - { - "rank": 1, - "name": "b", - "addr": "127.0.0.1:40001\/0", - "public_addr": "127.0.0.1:40001\/0" - }, - { - "rank": 2, - "name": "c", - "addr": "127.0.0.1:40002\/0", - "public_addr": "127.0.0.1:40002\/0" - } - ] - } - } - -A dump of the monitor state: + +Here the value of ``[name]`` can be found by consulting the output of the +``ceph quorum_status`` command. This command returns output similar to the +following: + +:: + + { + "name": "b", + "rank": 1, + "state": "peon", + "election_epoch": 6, + "quorum": [ + 0, + 1, + 2 + ], + "features": { + "required_con": "9025616074522624", + "required_mon": [ + "kraken" + ], + "quorum_con": "1152921504336314367", + "quorum_mon": [ + "kraken" + ] + }, + "outside_quorum": [], + "extra_probe_peers": [], + "sync_provider": [], + "monmap": { + "epoch": 2, + "fsid": "ba807e74-b64f-4b72-b43f-597dfe60ddbc", + "modified": "2016-12-26 14:42:09.288066", + "created": "2016-12-26 14:42:03.573585", + "features": { + "persistent": [ + "kraken" + ], + "optional": [] + }, + "mons": [ + { + "rank": 0, + "name": "a", + "addr": "127.0.0.1:40000\/0", + "public_addr": "127.0.0.1:40000\/0" + }, + { + "rank": 1, + "name": "b", + "addr": "127.0.0.1:40001\/0", + "public_addr": "127.0.0.1:40001\/0" + }, + { + "rank": 2, + "name": "c", + "addr": "127.0.0.1:40002\/0", + "public_addr": "127.0.0.1:40002\/0" + } + ] + } + } + +To see a dump of the monitor state, run the following command: .. prompt:: bash $ ceph mon dump -:: +This command returns output similar to the following: - dumped monmap epoch 2 - epoch 2 - fsid ba807e74-b64f-4b72-b43f-597dfe60ddbc - last_changed 2016-12-26 14:42:09.288066 - created 2016-12-26 14:42:03.573585 - 0: 127.0.0.1:40000/0 mon.a - 1: 127.0.0.1:40001/0 mon.b - 2: 127.0.0.1:40002/0 mon.c +:: + dumped monmap epoch 2 + epoch 2 + fsid ba807e74-b64f-4b72-b43f-597dfe60ddbc + last_changed 2016-12-26 14:42:09.288066 + created 2016-12-26 14:42:03.573585 + 0: 127.0.0.1:40000/0 mon.a + 1: 127.0.0.1:40001/0 mon.b + 2: 127.0.0.1:40002/0 mon.c diff --git a/ceph/doc/rados/operations/crush-map.rst b/ceph/doc/rados/operations/crush-map.rst index 54ad63130..39151e6d4 100644 --- a/ceph/doc/rados/operations/crush-map.rst +++ b/ceph/doc/rados/operations/crush-map.rst @@ -1043,6 +1043,8 @@ operations are served from the primary OSD of each PG. For erasure-coded pools, however, the speed of read operations can be increased by enabling **fast read** (see :ref:`pool-settings`). +.. _rados_ops_primary_affinity: + Primary Affinity ---------------- diff --git a/ceph/doc/rados/operations/erasure-code-profile.rst b/ceph/doc/rados/operations/erasure-code-profile.rst index 45b071f8a..947b34c1f 100644 --- a/ceph/doc/rados/operations/erasure-code-profile.rst +++ b/ceph/doc/rados/operations/erasure-code-profile.rst @@ -110,6 +110,8 @@ To remove an erasure code profile:: If the profile is referenced by a pool, the deletion will fail. +.. warning:: Removing an erasure code profile using ``osd erasure-code-profile rm`` does not automatically delete the associated CRUSH rule associated with the erasure code profile. It is recommended to manually remove the associated CRUSH rule using ``ceph osd crush rule remove {rule-name}`` to avoid unexpected behavior. + osd erasure-code-profile get ============================ diff --git a/ceph/doc/rados/operations/health-checks.rst b/ceph/doc/rados/operations/health-checks.rst index b97d4d64b..d52465602 100644 --- a/ceph/doc/rados/operations/health-checks.rst +++ b/ceph/doc/rados/operations/health-checks.rst @@ -1226,8 +1226,8 @@ The health check will be silenced for a specific pool only if POOL_APP_NOT_ENABLED ____________________ -A pool exists that contains one or more objects, but the pool has not been -tagged for use by a particular application. +A pool exists but the pool has not been tagged for use by a particular +application. To resolve this issue, tag the pool for use by an application. For example, if the pool is used by RBD, run the following command: @@ -1406,6 +1406,31 @@ other performance issue with the OSDs. The exact size of the snapshot trim queue is reported by the ``snaptrimq_len`` field of ``ceph pg ls -f json-detail``. +Stretch Mode +------------ + +INCORRECT_NUM_BUCKETS_STRETCH_MODE +__________________________________ + +Stretch mode currently only support 2 dividing buckets with OSDs, this warning suggests +that the number of dividing buckets is not equal to 2 after stretch mode is enabled. +You can expect unpredictable failures and MON assertions until the condition is fixed. + +We encourage you to fix this by removing additional dividing buckets or bump the +number of dividing buckets to 2. + +UNEVEN_WEIGHTS_STRETCH_MODE +___________________________ + +The 2 dividing buckets must have equal weights when stretch mode is enabled. +This warning suggests that the 2 dividing buckets have uneven weights after +stretch mode is enabled. This is not immediately fatal, however, you can expect +Ceph to be confused when trying to process transitions between dividing buckets. + +We encourage you to fix this by making the weights even on both dividing buckets. +This can be done by making sure the combined weight of the OSDs on each dividing +bucket are the same. + Miscellaneous ------------- diff --git a/ceph/doc/rados/operations/index.rst b/ceph/doc/rados/operations/index.rst index 2136918c7..15525c1d3 100644 --- a/ceph/doc/rados/operations/index.rst +++ b/ceph/doc/rados/operations/index.rst @@ -39,8 +39,9 @@ CRUSH algorithm. erasure-code cache-tiering placement-groups - balancer upmap + read-balancer + balancer crush-map crush-map-edits stretch-mode diff --git a/ceph/doc/rados/operations/monitoring-osd-pg.rst b/ceph/doc/rados/operations/monitoring-osd-pg.rst index 8090c7e0a..b0a6767a1 100644 --- a/ceph/doc/rados/operations/monitoring-osd-pg.rst +++ b/ceph/doc/rados/operations/monitoring-osd-pg.rst @@ -10,10 +10,11 @@ directly to specific OSDs. For this reason, tracking system faults requires finding the `placement group`_ (PG) and the underlying OSDs at the root of the problem. -.. tip:: A fault in one part of the cluster might prevent you from accessing a - particular object, but that doesn't mean that you are prevented from accessing other objects. - When you run into a fault, don't panic. Just follow the steps for monitoring - your OSDs and placement groups, and then begin troubleshooting. +.. tip:: A fault in one part of the cluster might prevent you from accessing a + particular object, but that doesn't mean that you are prevented from + accessing other objects. When you run into a fault, don't panic. Just + follow the steps for monitoring your OSDs and placement groups, and then + begin troubleshooting. Ceph is self-repairing. However, when problems persist, monitoring OSDs and placement groups will help you identify the problem. @@ -22,17 +23,20 @@ placement groups will help you identify the problem. Monitoring OSDs =============== -An OSD's status is as follows: it is either in the cluster (``in``) or out of the cluster -(``out``); likewise, it is either up and running (``up``) or down and not -running (``down``). If an OSD is ``up``, it can be either ``in`` the cluster -(if so, you can read and write data) or ``out`` of the cluster. If the OSD was previously -``in`` the cluster but was recently moved ``out`` of the cluster, Ceph will migrate its -PGs to other OSDs. If an OSD is ``out`` of the cluster, CRUSH will -not assign any PGs to that OSD. If an OSD is ``down``, it should also be -``out``. - -.. note:: If an OSD is ``down`` and ``in``, then there is a problem and the cluster - is not in a healthy state. +An OSD is either *in* service (``in``) or *out* of service (``out``). An OSD is +either running and reachable (``up``), or it is not running and not reachable +(``down``). + +If an OSD is ``up``, it may be either ``in`` service (clients can read and +write data) or it is ``out`` of service. If the OSD was ``in`` but then due to +a failure or a manual action was set to the ``out`` state, Ceph will migrate +placement groups to the other OSDs to maintin the configured redundancy. + +If an OSD is ``out`` of service, CRUSH will not assign placement groups to it. +If an OSD is ``down``, it will also be ``out``. + +.. note:: If an OSD is ``down`` and ``in``, there is a problem and this + indicates that the cluster is not in a healthy state. .. ditaa:: diff --git a/ceph/doc/rados/operations/placement-groups.rst b/ceph/doc/rados/operations/placement-groups.rst index 2fd490730..dda4a0177 100644 --- a/ceph/doc/rados/operations/placement-groups.rst +++ b/ceph/doc/rados/operations/placement-groups.rst @@ -210,6 +210,11 @@ process. We recommend constraining each pool so that it belongs to only one root (that is, one OSD class) to silence the warning and ensure a successful scaling process. +.. _managing_bulk_flagged_pools: + +Managing pools that are flagged with ``bulk`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + If a pool is flagged ``bulk``, then the autoscaler starts the pool with a full complement of PGs and then scales down the number of PGs only if the usage ratio across the pool is uneven. However, if a pool is not flagged ``bulk``, @@ -659,6 +664,7 @@ In releases of Ceph that are Nautilus and later (inclusive), when the ``pg_num``. This process manifests as periods of remapping of PGs and of backfill, and is expected behavior and normal. +.. _rados_ops_pgs_get_pg_num: Get the Number of PGs ===================== diff --git a/ceph/doc/rados/operations/pools.rst b/ceph/doc/rados/operations/pools.rst index f53c60fe4..dda9e844e 100644 --- a/ceph/doc/rados/operations/pools.rst +++ b/ceph/doc/rados/operations/pools.rst @@ -46,12 +46,49 @@ operations. Do not create or manipulate pools with these names. List Pools ========== -To list your cluster's pools, run the following command: +There are multiple ways to get the list of pools in your cluster. + +To list just your cluster's pool names (good for scripting), execute: + +.. prompt:: bash $ + + ceph osd pool ls + +:: + + .rgw.root + default.rgw.log + default.rgw.control + default.rgw.meta + +To list your cluster's pools with the pool number, run the following command: .. prompt:: bash $ ceph osd lspools +:: + + 1 .rgw.root + 2 default.rgw.log + 3 default.rgw.control + 4 default.rgw.meta + +To list your cluster's pools with additional information, execute: + +.. prompt:: bash $ + + ceph osd pool ls detail + +:: + + pool 1 '.rgw.root' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 19 flags hashpspool stripe_width 0 application rgw read_balance_score 4.00 + pool 2 'default.rgw.log' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 21 flags hashpspool stripe_width 0 application rgw read_balance_score 4.00 + pool 3 'default.rgw.control' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 23 flags hashpspool stripe_width 0 application rgw read_balance_score 4.00 + pool 4 'default.rgw.meta' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 25 flags hashpspool stripe_width 0 pg_autoscale_bias 4 application rgw read_balance_score 4.00 + +To get even more information, you can execute this command with the ``--format`` (or ``-f``) option and the ``json``, ``json-pretty``, ``xml`` or ``xml-pretty`` value. + .. _createpool: Creating a Pool @@ -462,82 +499,6 @@ You may set values for the following keys: :Type: Integer :Valid Range: ``1`` sets flag, ``0`` unsets flag -.. _hit_set_type: - -.. describe:: hit_set_type - - :Description: Enables HitSet tracking for cache pools. - For additional information, see `Bloom Filter`_. - :Type: String - :Valid Settings: ``bloom``, ``explicit_hash``, ``explicit_object`` - :Default: ``bloom``. Other values are for testing. - -.. _hit_set_count: - -.. describe:: hit_set_count - - :Description: Determines the number of HitSets to store for cache pools. The - higher the value, the more RAM is consumed by the ``ceph-osd`` - daemon. - :Type: Integer - :Valid Range: ``1``. Agent doesn't handle > ``1`` yet. - -.. _hit_set_period: - -.. describe:: hit_set_period - - :Description: Determines the duration of a HitSet period (in seconds) for - cache pools. The higher the value, the more RAM is consumed - by the ``ceph-osd`` daemon. - :Type: Integer - :Example: ``3600`` (3600 seconds: one hour) - -.. _hit_set_fpp: - -.. describe:: hit_set_fpp - - :Description: Determines the probability of false positives for the - ``bloom`` HitSet type. For additional information, see `Bloom - Filter`_. - :Type: Double - :Valid Range: ``0.0`` - ``1.0`` - :Default: ``0.05`` - -.. _cache_target_dirty_ratio: - -.. describe:: cache_target_dirty_ratio - - :Description: Sets a flush threshold for the percentage of the cache pool - containing modified (dirty) objects. When this threshold is - reached, the cache-tiering agent will flush these objects to - the backing storage pool. - :Type: Double - :Default: ``.4`` - -.. _cache_target_dirty_high_ratio: - -.. describe:: cache_target_dirty_high_ratio - - :Description: Sets a flush threshold for the percentage of the cache pool - containing modified (dirty) objects. When this threshold is - reached, the cache-tiering agent will flush these objects to - the backing storage pool with a higher speed (as compared with - ``cache_target_dirty_ratio``). - :Type: Double - :Default: ``.6`` - -.. _cache_target_full_ratio: - -.. describe:: cache_target_full_ratio - - :Description: Sets an eviction threshold for the percentage of the cache - pool containing unmodified (clean) objects. When this - threshold is reached, the cache-tiering agent will evict - these objects from the cache pool. - - :Type: Double - :Default: ``.8`` - .. _target_max_bytes: .. describe:: target_max_bytes @@ -556,41 +517,6 @@ You may set values for the following keys: :Type: Integer :Example: ``1000000`` #1M objects - -.. describe:: hit_set_grade_decay_rate - - :Description: Sets the temperature decay rate between two successive - HitSets. - :Type: Integer - :Valid Range: 0 - 100 - :Default: ``20`` - -.. describe:: hit_set_search_last_n - - :Description: Count at most N appearances in HitSets. Used for temperature - calculation. - :Type: Integer - :Valid Range: 0 - hit_set_count - :Default: ``1`` - -.. _cache_min_flush_age: - -.. describe:: cache_min_flush_age - - :Description: Sets the time (in seconds) before the cache-tiering agent - flushes an object from the cache pool to the storage pool. - :Type: Integer - :Example: ``600`` (600 seconds: ten minutes) - -.. _cache_min_evict_age: - -.. describe:: cache_min_evict_age - - :Description: Sets the time (in seconds) before the cache-tiering agent - evicts an object from the cache pool. - :Type: Integer - :Example: ``1800`` (1800 seconds: thirty minutes) - .. _fast_read: .. describe:: fast_read @@ -702,56 +628,6 @@ You may get values from the following keys: :Description: See crush_rule_. -``hit_set_type`` - -:Description: See hit_set_type_. - -:Type: String -:Valid Settings: ``bloom``, ``explicit_hash``, ``explicit_object`` - - -``hit_set_count`` - -:Description: See hit_set_count_. - -:Type: Integer - - -``hit_set_period`` - -:Description: See hit_set_period_. - -:Type: Integer - - -``hit_set_fpp`` - -:Description: See hit_set_fpp_. - -:Type: Double - - -``cache_target_dirty_ratio`` - -:Description: See cache_target_dirty_ratio_. - -:Type: Double - - -``cache_target_dirty_high_ratio`` - -:Description: See cache_target_dirty_high_ratio_. - -:Type: Double - - -``cache_target_full_ratio`` - -:Description: See cache_target_full_ratio_. - -:Type: Double - - ``target_max_bytes`` :Description: See target_max_bytes_. @@ -766,20 +642,6 @@ You may get values from the following keys: :Type: Integer -``cache_min_flush_age`` - -:Description: See cache_min_flush_age_. - -:Type: Integer - - -``cache_min_evict_age`` - -:Description: See cache_min_evict_age_. - -:Type: Integer - - ``fast_read`` :Description: See fast_read_. @@ -876,6 +738,10 @@ Ceph will list pools and highlight the ``replicated size`` attribute. By default, Ceph creates two replicas of an object (a total of three copies, for a size of ``3``). +Managing pools that are flagged with ``--bulk`` +=============================================== +See :ref:`managing_bulk_flagged_pools`. + .. _pgcalc: https://old.ceph.com/pgcalc/ .. _Pool, PG and CRUSH Config Reference: ../../configuration/pool-pg-config-ref diff --git a/ceph/doc/rados/operations/read-balancer.rst b/ceph/doc/rados/operations/read-balancer.rst new file mode 100644 index 000000000..0833e4326 --- /dev/null +++ b/ceph/doc/rados/operations/read-balancer.rst @@ -0,0 +1,64 @@ +.. _read_balancer: + +======================================= +Operating the Read (Primary) Balancer +======================================= + +You might be wondering: How can I improve performance in my Ceph cluster? +One important data point you can check is the ``read_balance_score`` on each +of your replicated pools. + +This metric, available via ``ceph osd pool ls detail`` (see :ref:`rados_pools` +for more details) indicates read performance, or how balanced the primaries are +for each replicated pool. In most cases, if a ``read_balance_score`` is above 1 +(for instance, 1.5), this means that your pool has unbalanced primaries and that +you may want to try improving your read performance with the read balancer. + +Online Optimization +=================== + +At present, there is no online option for the read balancer. However, we plan to add +the read balancer as an option to the :ref:`balancer` in the next Ceph version +so it can be enabled to run automatically in the background like the upmap balancer. + +Offline Optimization +==================== + +Primaries are updated with an offline optimizer that is built into the +:ref:`osdmaptool`. + +#. Grab the latest copy of your osdmap: + + .. prompt:: bash $ + + ceph osd getmap -o om + +#. Run the optimizer: + + .. prompt:: bash $ + + osdmaptool om --read out.txt --read-pool [--vstart] + + It is highly recommended that you run the capacity balancer before running the + balancer to ensure optimal results. See :ref:`upmap` for details on how to balance + capacity in a cluster. + +#. Apply the changes: + + .. prompt:: bash $ + + source out.txt + + In the above example, the proposed changes are written to the output file + ``out.txt``. The commands in this procedure are normal Ceph CLI commands + that can be run in order to apply the changes to the cluster. + + If you are working in a vstart cluster, you may pass the ``--vstart`` parameter + as shown above so the CLI commands are formatted with the `./bin/` prefix. + + Note that any time the number of pgs changes (for instance, if the pg autoscaler [:ref:`pg-autoscaler`] + kicks in), you should consider rechecking the scores and rerunning the balancer if needed. + +To see some details about what the tool is doing, you can pass +``--debug-osd 10`` to ``osdmaptool``. To see even more details, pass +``--debug-osd 20`` to ``osdmaptool``. diff --git a/ceph/doc/rados/operations/upmap.rst b/ceph/doc/rados/operations/upmap.rst index 8cce1cf8e..8541680d8 100644 --- a/ceph/doc/rados/operations/upmap.rst +++ b/ceph/doc/rados/operations/upmap.rst @@ -1,7 +1,8 @@ .. _upmap: +======================================= Using pg-upmap -============== +======================================= In Luminous v12.2.z and later releases, there is a *pg-upmap* exception table in the OSDMap that allows the cluster to explicitly map specific PGs to @@ -11,6 +12,9 @@ in most cases, uniformly distribute PGs across OSDs. However, there is an important caveat when it comes to this new feature: it requires all clients to understand the new *pg-upmap* structure in the OSDMap. +Online Optimization +=================== + Enabling -------- @@ -40,17 +44,17 @@ command: ceph features -Balancer module +Balancer Module --------------- The `balancer` module for ``ceph-mgr`` will automatically balance the number of PGs per OSD. See :ref:`balancer` -Offline optimization --------------------- +Offline Optimization +==================== -Upmap entries are updated with an offline optimizer that is built into -``osdmaptool``. +Upmap entries are updated with an offline optimizer that is built into the +:ref:`osdmaptool`. #. Grab the latest copy of your osdmap: diff --git a/ceph/doc/rados/troubleshooting/community.rst b/ceph/doc/rados/troubleshooting/community.rst index f816584ae..c0d7be10c 100644 --- a/ceph/doc/rados/troubleshooting/community.rst +++ b/ceph/doc/rados/troubleshooting/community.rst @@ -2,12 +2,18 @@ The Ceph Community ==================== +Ceph-users email list +===================== + The Ceph community is an excellent source of information and help. For -operational issues with Ceph releases we recommend you `subscribe to the -ceph-users email list`_. When you no longer want to receive emails, you can -`unsubscribe from the ceph-users email list`_. +operational issues with Ceph we recommend that you `subscribe to the ceph-users +email list`_. When you no longer want to receive emails, you can `unsubscribe +from the ceph-users email list`_. + +Ceph-devel email list +===================== -You may also `subscribe to the ceph-devel email list`_. You should do so if +You can also `subscribe to the ceph-devel email list`_. You should do so if your issue is: - Likely related to a bug @@ -16,11 +22,14 @@ your issue is: - Related to your own builds If you no longer want to receive emails from the ``ceph-devel`` email list, you -may `unsubscribe from the ceph-devel email list`_. +can `unsubscribe from the ceph-devel email list`_. + +Ceph report +=========== -.. tip:: The Ceph community is growing rapidly, and community members can help - you if you provide them with detailed information about your problem. You - can attach the output of the ``ceph report`` command to help people understand your issues. +.. tip:: Community members can help you if you provide them with detailed + information about your problem. Attach the output of the ``ceph report`` + command to help people understand your issues. .. _subscribe to the ceph-devel email list: mailto:dev-join@ceph.io .. _unsubscribe from the ceph-devel email list: mailto:dev-leave@ceph.io diff --git a/ceph/doc/rados/troubleshooting/cpu-profiling.rst b/ceph/doc/rados/troubleshooting/cpu-profiling.rst index 159f7998d..b7fdd1d41 100644 --- a/ceph/doc/rados/troubleshooting/cpu-profiling.rst +++ b/ceph/doc/rados/troubleshooting/cpu-profiling.rst @@ -9,59 +9,72 @@ you can profile Ceph's CPU usage. See `Installing Oprofile`_ for details. Initializing oprofile ===================== -The first time you use ``oprofile`` you need to initialize it. Locate the -``vmlinux`` image corresponding to the kernel you are now running. :: +``oprofile`` must be initalized the first time it is used. Locate the +``vmlinux`` image that corresponds to the kernel you are running: - ls /boot - sudo opcontrol --init - sudo opcontrol --setup --vmlinux={path-to-image} --separate=library --callgraph=6 +.. prompt:: bash $ + + ls /boot + sudo opcontrol --init + sudo opcontrol --setup --vmlinux={path-to-image} --separate=library --callgraph=6 Starting oprofile ================= -To start ``oprofile`` execute the following command:: +Run the following command to start ``oprofile``: - opcontrol --start +.. prompt:: bash $ -Once you start ``oprofile``, you may run some tests with Ceph. + opcontrol --start Stopping oprofile ================= -To stop ``oprofile`` execute the following command:: +Run the following command to stop ``oprofile``: + +.. prompt:: bash $ - opcontrol --stop - - + opcontrol --stop + + Retrieving oprofile Results =========================== -To retrieve the top ``cmon`` results, execute the following command:: +Run the following command to retrieve the top ``cmon`` results: + +.. prompt:: bash $ + + opreport -gal ./cmon | less + - opreport -gal ./cmon | less - +Run the following command to retrieve the top ``cmon`` results, with call +graphs attached: -To retrieve the top ``cmon`` results with call graphs attached, execute the -following command:: +.. prompt:: bash $ - opreport -cal ./cmon | less - -.. important:: After reviewing results, you should reset ``oprofile`` before - running it again. Resetting ``oprofile`` removes data from the session - directory. + opreport -cal ./cmon | less + +.. important:: After you have reviewed the results, reset ``oprofile`` before + running it again. The act of resetting ``oprofile`` removes data from the + session directory. Resetting oprofile ================== -To reset ``oprofile``, execute the following command:: +Run the following command to reset ``oprofile``: - sudo opcontrol --reset +.. prompt:: bash $ + + sudo opcontrol --reset -.. important:: You should reset ``oprofile`` after analyzing data so that - you do not commingle results from different tests. +.. important:: Reset ``oprofile`` after analyzing data. This ensures that + results from prior tests do not get mixed in with the results of the current + test. .. _oprofile: http://oprofile.sourceforge.net/about/ .. _Installing Oprofile: ../../../dev/cpu-profiler + + diff --git a/ceph/doc/rados/troubleshooting/index.rst b/ceph/doc/rados/troubleshooting/index.rst index 80d14f3ce..b481ee1dc 100644 --- a/ceph/doc/rados/troubleshooting/index.rst +++ b/ceph/doc/rados/troubleshooting/index.rst @@ -2,10 +2,10 @@ Troubleshooting ================= -Ceph is still on the leading edge, so you may encounter situations that require -you to examine your configuration, modify your logging output, troubleshoot -monitors and OSDs, profile memory and CPU usage, and reach out to the -Ceph community for help. +You may encounter situations that require you to examine your configuration, +consult the documentation, modify your logging output, troubleshoot monitors +and OSDs, profile memory and CPU usage, and, in the last resort, reach out to +the Ceph community for help. .. toctree:: :maxdepth: 1 diff --git a/ceph/doc/rados/troubleshooting/memory-profiling.rst b/ceph/doc/rados/troubleshooting/memory-profiling.rst index 85146653b..8e58f2d76 100644 --- a/ceph/doc/rados/troubleshooting/memory-profiling.rst +++ b/ceph/doc/rados/troubleshooting/memory-profiling.rst @@ -2,16 +2,23 @@ Memory Profiling ================== -Ceph MON, OSD and MDS can generate heap profiles using -``tcmalloc``. To generate heap profiles, ensure you have -``google-perftools`` installed:: +Ceph Monitor, OSD, and MDS can report ``TCMalloc`` heap profiles. Install +``google-perftools`` if you want to generate these. Your OS distribution might +package this under a different name (for example, ``gperftools``), and your OS +distribution might use a different package manager. Run a command similar to +this one to install ``google-perftools``: - sudo apt-get install google-perftools +.. prompt:: bash -The profiler dumps output to your ``log file`` directory (i.e., -``/var/log/ceph``). See `Logging and Debugging`_ for details. -To view the profiler logs with Google's performance tools, execute the -following:: + sudo apt-get install google-perftools + +The profiler dumps output to your ``log file`` directory (``/var/log/ceph``). +See `Logging and Debugging`_ for details. + +To view the profiler logs with Google's performance tools, run the following +command: + +.. prompt:: bash google-pprof --text {path-to-daemon} {log-path/filename} @@ -48,9 +55,9 @@ For example:: 0.0 0.4% 99.2% 0.0 0.6% decode_message ... -Another heap dump on the same daemon will add another file. It is -convenient to compare to a previous heap dump to show what has grown -in the interval. For instance:: +Performing another heap dump on the same daemon creates another file. It is +convenient to compare the new file to a file created by a previous heap dump to +show what has grown in the interval. For example:: $ google-pprof --text --base out/osd.0.profile.0001.heap \ ceph-osd out/osd.0.profile.0003.heap @@ -60,112 +67,137 @@ in the interval. For instance:: 0.0 0.9% 97.7% 0.0 26.1% ReplicatedPG::do_op 0.0 0.8% 98.5% 0.0 0.8% __gnu_cxx::new_allocator::allocate -Refer to `Google Heap Profiler`_ for additional details. +See `Google Heap Profiler`_ for additional details. + +After you have installed the heap profiler, start your cluster and begin using +the heap profiler. You can enable or disable the heap profiler at runtime, or +ensure that it runs continuously. When running commands based on the examples +that follow, do the following: -Once you have the heap profiler installed, start your cluster and -begin using the heap profiler. You may enable or disable the heap -profiler at runtime, or ensure that it runs continuously. For the -following commandline usage, replace ``{daemon-type}`` with ``mon``, -``osd`` or ``mds``, and replace ``{daemon-id}`` with the OSD number or -the MON or MDS id. +#. replace ``{daemon-type}`` with ``mon``, ``osd`` or ``mds`` +#. replace ``{daemon-id}`` with the OSD number or the MON ID or the MDS ID Starting the Profiler --------------------- -To start the heap profiler, execute the following:: +To start the heap profiler, run a command of the following form: - ceph tell {daemon-type}.{daemon-id} heap start_profiler +.. prompt:: bash -For example:: + ceph tell {daemon-type}.{daemon-id} heap start_profiler - ceph tell osd.1 heap start_profiler +For example: -Alternatively the profile can be started when the daemon starts -running if the ``CEPH_HEAP_PROFILER_INIT=true`` variable is found in -the environment. +.. prompt:: bash + + ceph tell osd.1 heap start_profiler + +Alternatively, if the ``CEPH_HEAP_PROFILER_INIT=true`` variable is found in the +environment, the profile will be started when the daemon starts running. Printing Stats -------------- -To print out statistics, execute the following:: +To print out statistics, run a command of the following form: + +.. prompt:: bash + + ceph tell {daemon-type}.{daemon-id} heap stats - ceph tell {daemon-type}.{daemon-id} heap stats +For example: -For example:: +.. prompt:: bash - ceph tell osd.0 heap stats + ceph tell osd.0 heap stats -.. note:: Printing stats does not require the profiler to be running and does - not dump the heap allocation information to a file. +.. note:: The reporting of stats with this command does not require the + profiler to be running and does not dump the heap allocation information to + a file. Dumping Heap Information ------------------------ -To dump heap information, execute the following:: +To dump heap information, run a command of the following form: - ceph tell {daemon-type}.{daemon-id} heap dump +.. prompt:: bash -For example:: + ceph tell {daemon-type}.{daemon-id} heap dump - ceph tell mds.a heap dump +For example: -.. note:: Dumping heap information only works when the profiler is running. +.. prompt:: bash + + ceph tell mds.a heap dump + +.. note:: Dumping heap information works only when the profiler is running. Releasing Memory ---------------- -To release memory that ``tcmalloc`` has allocated but which is not being used by -the Ceph daemon itself, execute the following:: +To release memory that ``tcmalloc`` has allocated but which is not being used +by the Ceph daemon itself, run a command of the following form: + +.. prompt:: bash + + ceph tell {daemon-type}{daemon-id} heap release - ceph tell {daemon-type}{daemon-id} heap release +For example: -For example:: +.. prompt:: bash - ceph tell osd.2 heap release + ceph tell osd.2 heap release Stopping the Profiler --------------------- -To stop the heap profiler, execute the following:: +To stop the heap profiler, run a command of the following form: - ceph tell {daemon-type}.{daemon-id} heap stop_profiler +.. prompt:: bash -For example:: + ceph tell {daemon-type}.{daemon-id} heap stop_profiler - ceph tell osd.0 heap stop_profiler +For example: + +.. prompt:: bash + + ceph tell osd.0 heap stop_profiler .. _Logging and Debugging: ../log-and-debug .. _Google Heap Profiler: http://goog-perftools.sourceforge.net/doc/heap_profiler.html -Alternative ways for memory profiling -------------------------------------- +Alternative Methods of Memory Profiling +---------------------------------------- Running Massif heap profiler with Valgrind ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Massif heap profiler tool can be used with Valgrind to -measure how much heap memory is used and is good for -troubleshooting for example Ceph RadosGW. +The Massif heap profiler tool can be used with Valgrind to measure how much +heap memory is used. This method is well-suited to troubleshooting RadosGW. + +See the `Massif documentation +`_ for more information. + +Install Valgrind from the package manager for your distribution then start the +Ceph daemon you want to troubleshoot: -See the `Massif documentation `_ for -more information. +.. prompt:: bash -Install Valgrind from the package manager for your distribution -then start the Ceph daemon you want to troubleshoot:: + sudo -u ceph valgrind --max-threads=1024 --tool=massif /usr/bin/radosgw -f --cluster ceph --name NAME --setuser ceph --setgroup ceph - sudo -u ceph valgrind --max-threads=1024 --tool=massif /usr/bin/radosgw -f --cluster ceph --name NAME --setuser ceph --setgroup ceph +When this command has completed its run, a file with a name of the form +``massif.out.`` will be saved in your current working directory. To run +the command above, the user who runs it must have write permissions in the +current directory. -A file similar to ``massif.out.`` will be saved when it exits -in your current working directory. The user running the process above -must have write permissions in the current directory. +Run the ``ms_print`` command to get a graph and statistics from the collected +data in the ``massif.out.`` file: -You can then run the ``ms_print`` command to get a graph and statistics -from the collected data in the ``massif.out.`` file:: +.. prompt:: bash - ms_print massif.out.12345 + ms_print massif.out.12345 -This output is great for inclusion in a bug report. +The output of this command is helpful when submitting a bug report. diff --git a/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst b/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst index a95b49058..1170da7c3 100644 --- a/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst +++ b/ceph/doc/rados/troubleshooting/troubleshooting-mon.rst @@ -6,70 +6,78 @@ .. index:: monitor, high availability -If a cluster encounters monitor-related problems, this does not necessarily -mean that the cluster is in danger of going down. Even if multiple monitors are -lost, the cluster can still be up and running, as long as there are enough -surviving monitors to form a quorum. - -However serious your cluster's monitor-related problems might be, we recommend -that you take the following troubleshooting steps. - +Even if a cluster experiences monitor-related problems, the cluster is not +necessarily in danger of going down. If a cluster has lost multiple monitors, +it can still remain up and running as long as there are enough surviving +monitors to form a quorum. + +If your cluster is having monitor-related problems, we recommend that you +consult the following troubleshooting information. Initial Troubleshooting ======================= -**Are the monitors running?** - - First, make sure that the monitor (*mon*) daemon processes (``ceph-mon``) are - running. Sometimes Ceph admins either forget to start the mons or forget to - restart the mons after an upgrade. Checking for this simple oversight can - save hours of painstaking troubleshooting. It is also important to make sure - that the manager daemons (``ceph-mgr``) are running. Remember that typical - cluster configurations provide one ``ceph-mgr`` for each ``ceph-mon``. - - .. note:: Rook will not run more than two managers. - -**Can you reach the monitor nodes?** - - In certain rare cases, there may be ``iptables`` rules that block access to - monitor nodes or TCP ports. These rules might be left over from earlier - stress testing or rule development. To check for the presence of such rules, - SSH into the server and then try to connect to the monitor's ports - (``tcp/3300`` and ``tcp/6789``) using ``telnet``, ``nc``, or a similar tool. - -**Does the ``ceph status`` command run and receive a reply from the cluster?** - - If the ``ceph status`` command does receive a reply from the cluster, then the - cluster is up and running. The monitors will answer to a ``status`` request - only if there is a formed quorum. Confirm that one or more ``mgr`` daemons - are reported as running. Under ideal conditions, all ``mgr`` daemons will be - reported as running. - - - If the ``ceph status`` command does not receive a reply from the cluster, then - there are probably not enough monitors ``up`` to form a quorum. The ``ceph - -s`` command with no further options specified connects to an arbitrarily - selected monitor. In certain cases, however, it might be helpful to connect - to a specific monitor (or to several specific monitors in sequence) by adding - the ``-m`` flag to the command: for example, ``ceph status -m mymon1``. - +The first steps in the process of troubleshooting Ceph Monitors involve making +sure that the Monitors are running and that they are able to communicate with +the network and on the network. Follow the steps in this section to rule out +the simplest causes of Monitor malfunction. -**None of this worked. What now?** +#. **Make sure that the Monitors are running.** - If the above solutions have not resolved your problems, you might find it - helpful to examine each individual monitor in turn. Whether or not a quorum - has been formed, it is possible to contact each monitor individually and - request its status by using the ``ceph tell mon.ID mon_status`` command (here - ``ID`` is the monitor's identifier). - - Run the ``ceph tell mon.ID mon_status`` command for each monitor in the - cluster. For more on this command's output, see :ref:`Understanding - mon_status - `. - - There is also an alternative method: SSH into each monitor node and query the - daemon's admin socket. See :ref:`Using the Monitor's Admin - Socket`. + Make sure that the Monitor (*mon*) daemon processes (``ceph-mon``) are + running. It might be the case that the mons have not be restarted after an + upgrade. Checking for this simple oversight can save hours of painstaking + troubleshooting. + + It is also important to make sure that the manager daemons (``ceph-mgr``) + are running. Remember that typical cluster configurations provide one + Manager (``ceph-mgr``) for each Monitor (``ceph-mon``). + + .. note:: In releases prior to v1.12.5, Rook will not run more than two + managers. + +#. **Make sure that you can reach the Monitor nodes.** + + In certain rare cases, ``iptables`` rules might be blocking access to + Monitor nodes or TCP ports. These rules might be left over from earlier + stress testing or rule development. To check for the presence of such + rules, SSH into each Monitor node and use ``telnet`` or ``nc`` or a similar + tool to attempt to connect to each of the other Monitor nodes on ports + ``tcp/3300`` and ``tcp/6789``. + +#. **Make sure that the "ceph status" command runs and receives a reply from the cluster.** + + If the ``ceph status`` command receives a reply from the cluster, then the + cluster is up and running. Monitors answer to a ``status`` request only if + there is a formed quorum. Confirm that one or more ``mgr`` daemons are + reported as running. In a cluster with no deficiencies, ``ceph status`` + will report that all ``mgr`` daemons are running. + + If the ``ceph status`` command does not receive a reply from the cluster, + then there are probably not enough Monitors ``up`` to form a quorum. If the + ``ceph -s`` command is run with no further options specified, it connects + to an arbitrarily selected Monitor. In certain cases, however, it might be + helpful to connect to a specific Monitor (or to several specific Monitors + in sequence) by adding the ``-m`` flag to the command: for example, ``ceph + status -m mymon1``. + +#. **None of this worked. What now?** + + If the above solutions have not resolved your problems, you might find it + helpful to examine each individual Monitor in turn. Even if no quorum has + been formed, it is possible to contact each Monitor individually and + request its status by using the ``ceph tell mon.ID mon_status`` command + (here ``ID`` is the Monitor's identifier). + + Run the ``ceph tell mon.ID mon_status`` command for each Monitor in the + cluster. For more on this command's output, see :ref:`Understanding + mon_status + `. + + There is also an alternative method for contacting each individual Monitor: + SSH into each Monitor node and query the daemon's admin socket. See + :ref:`Using the Monitor's Admin + Socket`. .. _rados_troubleshoting_troubleshooting_mon_using_admin_socket: @@ -175,106 +183,136 @@ the quorum is formed by only two monitors, and *c* is in the quorum as a ``IP:PORT`` combination, the **lower** the rank. In this case, because ``127.0.0.1:6789`` is lower than the other two ``IP:PORT`` combinations, ``mon.a`` has the highest rank: namely, rank 0. - + Most Common Monitor Issues =========================== -Have Quorum but at least one Monitor is down ---------------------------------------------- +The Cluster Has Quorum but at Least One Monitor is Down +------------------------------------------------------- -When this happens, depending on the version of Ceph you are running, -you should be seeing something similar to:: +When the cluster has quorum but at least one monitor is down, ``ceph health +detail`` returns a message similar to the following:: $ ceph health detail [snip] mon.a (rank 0) addr 127.0.0.1:6789/0 is down (out of quorum) -How to troubleshoot this? +**How do I troubleshoot a Ceph cluster that has quorum but also has at least one monitor down?** - First, make sure ``mon.a`` is running. + #. Make sure that ``mon.a`` is running. - Second, make sure you are able to connect to ``mon.a``'s node from the - other mon nodes. Check the TCP ports as well. Check ``iptables`` and - ``nf_conntrack`` on all nodes and ensure that you are not - dropping/rejecting connections. + #. Make sure that you can connect to ``mon.a``'s node from the + other Monitor nodes. Check the TCP ports as well. Check ``iptables`` and + ``nf_conntrack`` on all nodes and make sure that you are not + dropping/rejecting connections. - If this initial troubleshooting doesn't solve your problems, then it's - time to go deeper. + If this initial troubleshooting doesn't solve your problem, then further + investigation is necessary. First, check the problematic monitor's ``mon_status`` via the admin socket as explained in `Using the monitor's admin socket`_ and `Understanding mon_status`_. - If the monitor is out of the quorum, its state should be one of ``probing``, - ``electing`` or ``synchronizing``. If it happens to be either ``leader`` or - ``peon``, then the monitor believes to be in quorum, while the remaining - cluster is sure it is not; or maybe it got into the quorum while we were - troubleshooting the monitor, so check you ``ceph status`` again just to make - sure. Proceed if the monitor is not yet in the quorum. - -What if the state is ``probing``? - - This means the monitor is still looking for the other monitors. Every time - you start a monitor, the monitor will stay in this state for some time while - trying to connect the rest of the monitors specified in the ``monmap``. The - time a monitor will spend in this state can vary. For instance, when on a - single-monitor cluster (never do this in production), the monitor will pass - through the probing state almost instantaneously. In a multi-monitor - cluster, the monitors will stay in this state until they find enough monitors - to form a quorum |---| this means that if you have 2 out of 3 monitors down, the - one remaining monitor will stay in this state indefinitely until you bring - one of the other monitors up. - - If you have a quorum the starting daemon should be able to find the - other monitors quickly, as long as they can be reached. If your - monitor is stuck probing and you have gone through with all the communication - troubleshooting, then there is a fair chance that the monitor is trying - to reach the other monitors on a wrong address. ``mon_status`` outputs the - ``monmap`` known to the monitor: check if the other monitor's locations - match reality. If they don't, jump to - `Recovering a Monitor's Broken monmap`_; if they do, then it may be related - to severe clock skews amongst the monitor nodes and you should refer to - `Clock Skews`_ first, but if that doesn't solve your problem then it is - the time to prepare some logs and reach out to the community (please refer - to `Preparing your logs`_ on how to best prepare your logs). - - -What if state is ``electing``? - - This means the monitor is in the middle of an election. With recent Ceph - releases these typically complete quickly, but at times the monitors can - get stuck in what is known as an *election storm*. This can indicate - clock skew among the monitor nodes; jump to - `Clock Skews`_ for more information. If all your clocks are properly - synchronized, you should search the mailing lists and tracker. - This is not a state that is likely to persist and aside from - (*really*) old bugs there is not an obvious reason besides clock skews on - why this would happen. Worst case, if there are enough surviving mons, - down the problematic one while you investigate. - -What if state is ``synchronizing``? - - This means the monitor is catching up with the rest of the cluster in - order to join the quorum. Time to synchronize is a function of the size - of your monitor store and thus of cluster size and state, so if you have a - large or degraded cluster this may take a while. - - If you notice that the monitor jumps from ``synchronizing`` to - ``electing`` and then back to ``synchronizing``, then you do have a - problem: the cluster state may be advancing (i.e., generating new maps) - too fast for the synchronization process to keep up. This was a more common - thing in early days (Cuttlefish), but since then the synchronization process - has been refactored and enhanced to avoid this dynamic. If you experience - this in later versions please let us know via a bug tracker. And bring some logs - (see `Preparing your logs`_). - -What if state is ``leader`` or ``peon``? - - This should not happen: famous last words. If it does, however, it likely - has a lot to do with clock skew -- see `Clock Skews`_. If you are not - suffering from clock skew, then please prepare your logs (see - `Preparing your logs`_) and reach out to the community. + If the Monitor is out of the quorum, then its state will be one of the + following: ``probing``, ``electing`` or ``synchronizing``. If the state of + the Monitor is ``leader`` or ``peon``, then the Monitor believes itself to be + in quorum but the rest of the cluster believes that it is not in quorum. It + is possible that a Monitor that is in one of the ``probing``, ``electing``, + or ``synchronizing`` states has entered the quorum during the process of + troubleshooting. Check ``ceph status`` again to determine whether the Monitor + has entered quorum during your troubleshooting. If the Monitor remains out of + the quorum, then proceed with the investigations described in this section of + the documentation. + + +**What does it mean when a Monitor's state is ``probing``?** + + If ``ceph health detail`` shows that a Monitor's state is + ``probing``, then the Monitor is still looking for the other Monitors. Every + Monitor remains in this state for some time when it is started. When a + Monitor has connected to the other Monitors specified in the ``monmap``, it + ceases to be in the ``probing`` state. The amount of time that a Monitor is + in the ``probing`` state depends upon the parameters of the cluster of which + it is a part. For example, when a Monitor is a part of a single-monitor + cluster (never do this in production), the monitor passes through the probing + state almost instantaneously. In a multi-monitor cluster, the Monitors stay + in the ``probing`` state until they find enough monitors to form a quorum + |---| this means that if two out of three Monitors in the cluster are + ``down``, the one remaining Monitor stays in the ``probing`` state + indefinitely until you bring one of the other monitors up. + + If quorum has been established, then the Monitor daemon should be able to + find the other Monitors quickly, as long as they can be reached. If a Monitor + is stuck in the ``probing`` state and you have exhausted the procedures above + that describe the troubleshooting of communications between the Monitors, + then it is possible that the problem Monitor is trying to reach the other + Monitors at a wrong address. ``mon_status`` outputs the ``monmap`` that is + known to the monitor: determine whether the other Monitors' locations as + specified in the ``monmap`` match the locations of the Monitors in the + network. If they do not, see `Recovering a Monitor's Broken monmap`_. + If the locations of the Monitors as specified in the ``monmap`` match the + locations of the Monitors in the network, then the persistent + ``probing`` state could be related to severe clock skews amongst the monitor + nodes. See `Clock Skews`_. If the information in `Clock Skews`_ does not + bring the Monitor out of the ``probing`` state, then prepare your system logs + and ask the Ceph community for help. See `Preparing your logs`_ for + information about the proper preparation of logs. + + +**What does it mean when a Monitor's state is ``electing``?** + + If ``ceph health detail`` shows that a Monitor's state is ``electing``, the + monitor is in the middle of an election. Elections typically complete + quickly, but sometimes the monitors can get stuck in what is known as an + *election storm*. See :ref:`Monitor Elections ` for more + on monitor elections. + + The presence of election storm might indicate clock skew among the monitor + nodes. See `Clock Skews`_ for more information. + + If your clocks are properly synchronized, search the mailing lists and bug + tracker for issues similar to your issue. The ``electing`` state is not + likely to persist. In versions of Ceph after the release of Cuttlefish, there + is no obvious reason other than clock skew that explains why an ``electing`` + state would persist. + + It is possible to investigate the cause of a persistent ``electing`` state if + you put the problematic Monitor into a ``down`` state while you investigate. + This is possible only if there are enough surviving Monitors to form quorum. + +**What does it mean when a Monitor's state is ``synchronizing``?** + + If ``ceph health detail`` shows that the Monitor is ``synchronizing``, the + monitor is catching up with the rest of the cluster so that it can join the + quorum. The amount of time that it takes for the Monitor to synchronize with + the rest of the quorum is a function of the size of the cluster's monitor + store, the cluster's size, and the state of the cluster. Larger and degraded + clusters generally keep Monitors in the ``synchronizing`` state longer than + do smaller, new clusters. + + A Monitor that changes its state from ``synchronizing`` to ``electing`` and + then back to ``synchronizing`` indicates a problem: the cluster state may be + advancing (that is, generating new maps) too fast for the synchronization + process to keep up with the pace of the creation of the new maps. This issue + presented more frequently prior to the Cuttlefish release than it does in + more recent releases, because the synchronization process has since been + refactored and enhanced to avoid this dynamic. If you experience this in + later versions, report the issue in the `Ceph bug tracker + `_. Prepare and provide logs to substantiate any + bug you raise. See `Preparing your logs`_ for information about the proper + preparation of logs. + +**What does it mean when a Monitor's state is ``leader`` or ``peon``?** + + If ``ceph health detail`` shows that the Monitor is in the ``leader`` state + or in the ``peon`` state, it is likely that clock skew is present. Follow the + instructions in `Clock Skews`_. If you have followed those instructions and + ``ceph health detail`` still shows that the Monitor is in the ``leader`` + state or the ``peon`` state, report the issue in the `Ceph bug tracker + `_. If you raise an issue, provide logs to + substantiate it. See `Preparing your logs`_ for information about the + proper preparation of logs. Recovering a Monitor's Broken ``monmap`` @@ -317,18 +355,21 @@ Scrap the monitor and redeploy Inject a monmap into the monitor - Usually the safest path. You should grab the monmap from the remaining - monitors and inject it into the monitor with the corrupted/lost monmap. - These are the basic steps: - 1. Is there a formed quorum? If so, grab the monmap from the quorum:: + Retrieve the ``monmap`` from the surviving monitors and inject it into the + monitor whose ``monmap`` is corrupted or lost. + + Implement this solution by carrying out the following procedure: + + 1. Is there a quorum of monitors? If so, retrieve the ``monmap`` from the + quorum:: $ ceph mon getmap -o /tmp/monmap - 2. No quorum? Grab the monmap directly from another monitor (this - assumes the monitor you are grabbing the monmap from has id ID-FOO - and has been stopped):: + 2. If there is no quorum, then retrieve the ``monmap`` directly from another + monitor that has been stopped (in this example, the other monitor has + the ID ``ID-FOO``):: $ ceph-mon -i ID-FOO --extract-monmap /tmp/monmap @@ -340,97 +381,105 @@ Inject a monmap into the monitor 5. Start the monitor - Please keep in mind that the ability to inject monmaps is a powerful - feature that can cause havoc with your monitors if misused as it will - overwrite the latest, existing monmap kept by the monitor. - + .. warning:: Injecting ``monmaps`` can cause serious problems because doing + so will overwrite the latest existing ``monmap`` stored on the monitor. Be + careful! Clock Skews ------------- +----------- -Monitor operation can be severely affected by clock skew among the quorum's -mons, as the PAXOS consensus algorithm requires tight time alignment. -Skew can result in weird behavior with no obvious -cause. To avoid such issues, you must run a clock synchronization tool -on your monitor nodes: ``Chrony`` or the legacy ``ntpd``. Be sure to -configure the mon nodes with the `iburst` option and multiple peers: +The Paxos consensus algorithm requires close time synchroniziation, which means +that clock skew among the monitors in the quorum can have a serious effect on +monitor operation. The resulting behavior can be puzzling. To avoid this issue, +run a clock synchronization tool on your monitor nodes: for example, use +``Chrony`` or the legacy ``ntpd`` utility. Configure each monitor nodes so that +the `iburst` option is in effect and so that each monitor has multiple peers, +including the following: * Each other * Internal ``NTP`` servers * Multiple external, public pool servers -For good measure, *all* nodes in your cluster should also sync against -internal and external servers, and perhaps even your mons. ``NTP`` servers -should run on bare metal; VM virtualized clocks are not suitable for steady -timekeeping. Visit `https://www.ntp.org `_ for more info. Your -organization may already have quality internal ``NTP`` servers you can use. -Sources for ``NTP`` server appliances include: +.. note:: The ``iburst`` option sends a burst of eight packets instead of the + usual single packet, and is used during the process of getting two peers + into initial synchronization. + +Furthermore, it is advisable to synchronize *all* nodes in your cluster against +internal and external servers, and perhaps even against your monitors. Run +``NTP`` servers on bare metal: VM-virtualized clocks are not suitable for +steady timekeeping. See `https://www.ntp.org `_ for more +information about the Network Time Protocol (NTP). Your organization might +already have quality internal ``NTP`` servers available. Sources for ``NTP`` +server appliances include the following: * Microsemi (formerly Symmetricom) `https://microsemi.com `_ * EndRun `https://endruntechnologies.com `_ * Netburner `https://www.netburner.com `_ +Clock Skew Questions and Answers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -What's the maximum tolerated clock skew? - - By default the monitors will allow clocks to drift up to 0.05 seconds (50 ms). +**What's the maximum tolerated clock skew?** + By default, monitors allow clocks to drift up to a maximum of 0.05 seconds + (50 milliseconds). -Can I increase the maximum tolerated clock skew? +**Can I increase the maximum tolerated clock skew?** - The maximum tolerated clock skew is configurable via the - ``mon-clock-drift-allowed`` option, and - although you *CAN* you almost certainly *SHOULDN'T*. The clock skew mechanism - is in place because clock-skewed monitors are likely to misbehave. We, as - developers and QA aficionados, are comfortable with the current default - value, as it will alert the user before the monitors get out hand. Changing - this value may cause unforeseen effects on the - stability of the monitors and overall cluster health. + Yes, but we strongly recommend against doing so. The maximum tolerated clock + skew is configurable via the ``mon-clock-drift-allowed`` option, but it is + almost certainly a bad idea to make changes to this option. The clock skew + maximum is in place because clock-skewed monitors cannot be relied upon. The + current default value has proven its worth at alerting the user before the + monitors encounter serious problems. Changing this value might cause + unforeseen effects on the stability of the monitors and overall cluster + health. -How do I know there's a clock skew? +**How do I know whether there is a clock skew?** - The monitors will warn you via the cluster status ``HEALTH_WARN``. ``ceph - health detail`` or ``ceph status`` should show something like:: + The monitors will warn you via the cluster status ``HEALTH_WARN``. When clock + skew is present, the ``ceph health detail`` and ``ceph status`` commands + return an output resembling the following:: mon.c addr 10.10.0.1:6789/0 clock skew 0.08235s > max 0.05s (latency 0.0045s) - That means that ``mon.c`` has been flagged as suffering from a clock skew. - - On releases beginning with Luminous you can issue the ``ceph - time-sync-status`` command to check status. Note that the lead mon is - typically the one with the numerically lowest IP address. It will always - show ``0``: the reported offsets of other mons are relative to the lead mon, - not to any external reference source. + In this example, the monitor ``mon.c`` has been flagged as suffering from + clock skew. + In Luminous and later releases, it is possible to check for a clock skew by + running the ``ceph time-sync-status`` command. Note that the lead monitor + typically has the numerically lowest IP address. It will always show ``0``: + the reported offsets of other monitors are relative to the lead monitor, not + to any external reference source. -What should I do if there's a clock skew? +**What should I do if there is a clock skew?** - Synchronize your clocks. Running an NTP client may help. If you are already - using one and you hit this sort of issues, check if you are using some NTP - server remote to your network and consider hosting your own NTP server on - your network. This last option tends to reduce the amount of issues with - monitor clock skews. + Synchronize your clocks. Using an NTP client might help. However, if you + are already using an NTP client and you still encounter clock skew problems, + determine whether the NTP server that you are using is remote to your network + or instead hosted on your network. Hosting your own NTP servers tends to + mitigate clock skew problems. Client Can't Connect or Mount ------------------------------- +----------------------------- -Check your IP tables. Some OS install utilities add a ``REJECT`` rule to -``iptables``. The rule rejects all clients trying to connect to the host except -for ``ssh``. If your monitor host's IP tables have such a ``REJECT`` rule in -place, clients connecting from a separate node will fail to mount with a timeout -error. You need to address ``iptables`` rules that reject clients trying to -connect to Ceph daemons. For example, you would need to address rules that look -like this appropriately:: +Check your IP tables. Some operating-system install utilities add a ``REJECT`` +rule to ``iptables``. ``iptables`` rules will reject all clients other than +``ssh`` that try to connect to the host. If your monitor host's IP tables have +a ``REJECT`` rule in place, clients that are connecting from a separate node +will fail and will raise a timeout error. Any ``iptables`` rules that reject +clients trying to connect to Ceph daemons must be addressed. For example:: - REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + REJECT all -- anywhere anywhere reject-with icmp-host-prohibited -You may also need to add rules to IP tables on your Ceph hosts to ensure -that clients can access the ports associated with your Ceph monitors (i.e., port -6789 by default) and Ceph OSDs (i.e., 6800 through 7300 by default). For +It might also be necessary to add rules to iptables on your Ceph hosts to +ensure that clients are able to access the TCP ports associated with your Ceph +monitors (default: port 6789) and Ceph OSDs (default: 6800 through 7300). For example:: - iptables -A INPUT -m multiport -p tcp -s {ip-address}/{netmask} --dports 6789,6800:7300 -j ACCEPT + iptables -A INPUT -m multiport -p tcp -s {ip-address}/{netmask} --dports 6789,6800:7300 -j ACCEPT + Monitor Store Failures ====================== @@ -438,9 +487,9 @@ Monitor Store Failures Symptoms of store corruption ---------------------------- -Ceph monitor stores the :term:`Cluster Map` in a key/value store such as LevelDB. If -a monitor fails due to the key/value store corruption, following error messages -might be found in the monitor log:: +Ceph monitors store the :term:`Cluster Map` in a key-value store. If key-value +store corruption causes a monitor to fail, then the monitor log might contain +one of the following error messages:: Corruption: error in middle of record @@ -451,21 +500,26 @@ or:: Recovery using healthy monitor(s) --------------------------------- -If there are any survivors, we can always :ref:`replace ` the corrupted one with a -new one. After booting up, the new joiner will sync up with a healthy -peer, and once it is fully sync'ed, it will be able to serve the clients. +If there are surviving monitors, we can always :ref:`replace +` the corrupted monitor with a new one. After the +new monitor boots, it will synchronize with a healthy peer. After the new +monitor is fully synchronized, it will be able to serve clients. .. _mon-store-recovery-using-osds: Recovery using OSDs ------------------- -But what if all monitors fail at the same time? Since users are encouraged to -deploy at least three (and preferably five) monitors in a Ceph cluster, the chance of simultaneous -failure is rare. But unplanned power-downs in a data center with improperly -configured disk/fs settings could fail the underlying file system, and hence -kill all the monitors. In this case, we can recover the monitor store with the -information stored in OSDs. +Even if all monitors fail at the same time, it is possible to recover the +monitor store by using information stored in OSDs. You are encouraged to deploy +at least three (and preferably five) monitors in a Ceph cluster. In such a +deployment, complete monitor failure is unlikely. However, unplanned power loss +in a data center whose disk settings or filesystem settings are improperly +configured could cause the underlying filesystem to fail and this could kill +all of the monitors. In such a case, data in the OSDs can be used to recover +the monitors. The following is such a script and can be used to recover the +monitors: + .. code-block:: bash @@ -516,124 +570,142 @@ information stored in OSDs. mv $ms/store.db /var/lib/ceph/mon/mon.foo/store.db chown -R ceph:ceph /var/lib/ceph/mon/mon.foo/store.db -The steps above +This script performs the following steps: + +#. Collects the map from each OSD host. +#. Rebuilds the store. +#. Fills the entities in the keyring file with appropriate capabilities. +#. Replaces the corrupted store on ``mon.foo`` with the recovered copy. -#. collect the map from all OSD hosts, -#. then rebuild the store, -#. fill the entities in keyring file with appropriate caps -#. replace the corrupted store on ``mon.foo`` with the recovered copy. Known limitations ~~~~~~~~~~~~~~~~~ -Following information are not recoverable using the steps above: +The above recovery tool is unable to recover the following information: -- **some added keyrings**: all the OSD keyrings added using ``ceph auth add`` command - are recovered from the OSD's copy. And the ``client.admin`` keyring is imported - using ``ceph-monstore-tool``. But the MDS keyrings and other keyrings are missing - in the recovered monitor store. You might need to re-add them manually. +- **Certain added keyrings**: All of the OSD keyrings added using the ``ceph + auth add`` command are recovered from the OSD's copy, and the + ``client.admin`` keyring is imported using ``ceph-monstore-tool``. However, + the MDS keyrings and all other keyrings will be missing in the recovered + monitor store. You might need to manually re-add them. -- **creating pools**: If any RADOS pools were in the process of being creating, that state is lost. The recovery tool assumes that all pools have been created. If there are PGs that are stuck in the 'unknown' after the recovery for a partially created pool, you can force creation of the *empty* PG with the ``ceph osd force-create-pg`` command. Note that this will create an *empty* PG, so only do this if you know the pool is empty. - -- **MDS Maps**: the MDS maps are lost. +- **Creating pools**: If any RADOS pools were in the process of being created, + that state is lost. The recovery tool operates on the assumption that all + pools have already been created. If there are PGs that are stuck in the + 'unknown' state after the recovery for a partially created pool, you can + force creation of the *empty* PG by running the ``ceph osd force-create-pg`` + command. Note that this will create an *empty* PG, so take this action only + if you know the pool is empty. +- **MDS Maps**: The MDS maps are lost. Everything Failed! Now What? -============================= +============================ Reaching out for help ----------------------- +--------------------- -You can find us on IRC at #ceph and #ceph-devel at OFTC (server irc.oftc.net) -and on ``dev@ceph.io`` and ``ceph-users@lists.ceph.com``. Make -sure you have grabbed your logs and have them ready if someone asks: the faster -the interaction and lower the latency in response, the better chances everyone's -time is optimized. +You can find help on IRC in #ceph and #ceph-devel on OFTC (server +irc.oftc.net), or at ``dev@ceph.io`` and ``ceph-users@lists.ceph.com``. Make +sure that you have prepared your logs and that you have them ready upon +request. + +See https://ceph.io/en/community/connect/ for current (as of October 2023) +information on getting in contact with the upstream Ceph community. Preparing your logs ---------------------- +------------------- -Monitor logs are, by default, kept in ``/var/log/ceph/ceph-mon.FOO.log*``. We -may want them. However, your logs may not have the necessary information. If -you don't find your monitor logs at their default location, you can check -where they should be by running:: +The default location for monitor logs is ``/var/log/ceph/ceph-mon.FOO.log*``. +However, if they are not there, you can find their current location by running +the following command: - ceph-conf --name mon.FOO --show-config-value log_file +.. prompt:: bash -The amount of information in the logs are subject to the debug levels being -enforced by your configuration files. If you have not enforced a specific -debug level then Ceph is using the default levels and your logs may not -contain important information to track down you issue. -A first step in getting relevant information into your logs will be to raise -debug levels. In this case we will be interested in the information from the -monitor. -Similarly to what happens on other components, different parts of the monitor -will output their debug information on different subsystems. + ceph-conf --name mon.FOO --show-config-value log_file -You will have to raise the debug levels of those subsystems more closely -related to your issue. This may not be an easy task for someone unfamiliar -with troubleshooting Ceph. For most situations, setting the following options -on your monitors will be enough to pinpoint a potential source of the issue:: +The amount of information in the logs is determined by the debug levels in the +cluster's configuration files. If Ceph is using the default debug levels, then +your logs might be missing important information that would help the upstream +Ceph community address your issue. + +To make sure your monitor logs contain relevant information, you can raise +debug levels. Here we are interested in information from the monitors. As with +other components, the monitors have different parts that output their debug +information on different subsystems. + +If you are an experienced Ceph troubleshooter, we recommend raising the debug +levels of the most relevant subsystems. Of course, this approach might not be +easy for beginners. In most cases, however, enough information to address the +issue will be secured if the following debug levels are entered:: debug_mon = 10 debug_ms = 1 -If we find that these debug levels are not enough, there's a chance we may -ask you to raise them or even define other debug subsystems to obtain infos -from -- but at least we started off with some useful information, instead -of a massively empty log without much to go on with. +Sometimes these debug levels do not yield enough information. In such cases, +members of the upstream Ceph community might ask you to make additional changes +to these or to other debug levels. In any case, it is better for us to receive +at least some useful information than to receive an empty log. + Do I need to restart a monitor to adjust debug levels? ------------------------------------------------------ -No. You may do it in one of two ways: +No, restarting a monitor is not necessary. Debug levels may be adjusted by +using two different methods, depending on whether or not there is a quorum: -You have quorum +There is a quorum - Either inject the debug option into the monitor you want to debug:: + Either inject the debug option into the specific monitor that needs to + be debugged:: ceph tell mon.FOO config set debug_mon 10/10 - or into all monitors at once:: + Or inject it into all monitors at once:: ceph tell mon.* config set debug_mon 10/10 -No quorum - Use the monitor's admin socket and directly adjust the configuration - options:: +There is no quorum + + Use the admin socket of the specific monitor that needs to be debugged + and directly adjust the monitor's configuration options:: ceph daemon mon.FOO config set debug_mon 10/10 -Going back to default values is as easy as rerunning the above commands -using the debug level ``1/10`` instead. You can check your current -values using the admin socket and the following commands:: +To return the debug levels to their default values, run the above commands +using the debug level ``1/10`` rather than ``10/10``. To check a monitor's +current values, use the admin socket and run either of the following commands: - ceph daemon mon.FOO config show + .. prompt:: bash -or:: + ceph daemon mon.FOO config show + +or: + + .. prompt:: bash + + ceph daemon mon.FOO config get 'OPTION_NAME' - ceph daemon mon.FOO config get 'OPTION_NAME' -Reproduced the problem with appropriate debug levels. Now what? ----------------------------------------------------------------- +I Reproduced the problem with appropriate debug levels. Now what? +----------------------------------------------------------------- -Ideally you would send us only the relevant portions of your logs. -We realise that figuring out the corresponding portion may not be the -easiest of tasks. Therefore, we won't hold it to you if you provide the -full log, but common sense should be employed. If your log has hundreds -of thousands of lines, it may get tricky to go through the whole thing, -specially if we are not aware at which point, whatever your issue is, -happened. For instance, when reproducing, keep in mind to write down -current time and date and to extract the relevant portions of your logs -based on that. +We prefer that you send us only the portions of your logs that are relevant to +your monitor problems. Of course, it might not be easy for you to determine +which portions are relevant so we are willing to accept complete and +unabridged logs. However, we request that you avoid sending logs containing +hundreds of thousands of lines with no additional clarifying information. One +common-sense way of making our task easier is to write down the current time +and date when you are reproducing the problem and then extract portions of your +logs based on that information. -Finally, you should reach out to us on the mailing lists, on IRC or file -a new issue on the `tracker`_. +Finally, reach out to us on the mailing lists or IRC or Slack, or by filing a +new issue on the `tracker`_. .. _tracker: http://tracker.ceph.com/projects/ceph/issues/new diff --git a/ceph/doc/rados/troubleshooting/troubleshooting-osd.rst b/ceph/doc/rados/troubleshooting/troubleshooting-osd.rst index 5d0185b9b..035947d7e 100644 --- a/ceph/doc/rados/troubleshooting/troubleshooting-osd.rst +++ b/ceph/doc/rados/troubleshooting/troubleshooting-osd.rst @@ -2,438 +2,581 @@ Troubleshooting OSDs ====================== -Before troubleshooting your OSDs, first check your monitors and network. If -you execute ``ceph health`` or ``ceph -s`` on the command line and Ceph shows -``HEALTH_OK``, it means that the monitors have a quorum. -If you don't have a monitor quorum or if there are errors with the monitor -status, `address the monitor issues first <../troubleshooting-mon>`_. -Check your networks to ensure they -are running properly, because networks may have a significant impact on OSD -operation and performance. Look for dropped packets on the host side -and CRC errors on the switch side. +Before troubleshooting the cluster's OSDs, check the monitors +and the network. + +First, determine whether the monitors have a quorum. Run the ``ceph health`` +command or the ``ceph -s`` command and if Ceph shows ``HEALTH_OK`` then there +is a monitor quorum. + +If the monitors don't have a quorum or if there are errors with the monitor +status, address the monitor issues before proceeding by consulting the material +in `Troubleshooting Monitors <../troubleshooting-mon>`_. + +Next, check your networks to make sure that they are running properly. Networks +can have a significant impact on OSD operation and performance. Look for +dropped packets on the host side and CRC errors on the switch side. + Obtaining Data About OSDs ========================= -A good first step in troubleshooting your OSDs is to obtain topology information in -addition to the information you collected while `monitoring your OSDs`_ -(e.g., ``ceph osd tree``). +When troubleshooting OSDs, it is useful to collect different kinds of +information about the OSDs. Some information comes from the practice of +`monitoring OSDs`_ (for example, by running the ``ceph osd tree`` command). +Additional information concerns the topology of your cluster, and is discussed +in the following sections. Ceph Logs --------- -If you haven't changed the default path, you can find Ceph log files at -``/var/log/ceph``:: +Ceph log files are stored under ``/var/log/ceph``. Unless the path has been +changed (or you are in a containerized environment that stores logs in a +different location), the log files can be listed by running the following +command: + +.. prompt:: bash - ls /var/log/ceph + ls /var/log/ceph + +If there is not enough log detail, change the logging level. To ensure that +Ceph performs adequately under high logging volume, see `Logging and +Debugging`_. -If you don't see enough log detail you can change your logging level. See -`Logging and Debugging`_ for details to ensure that Ceph performs adequately -under high logging volume. Admin Socket ------------ -Use the admin socket tool to retrieve runtime information. For details, list -the sockets for your Ceph daemons:: +Use the admin socket tool to retrieve runtime information. First, list the +sockets of Ceph's daemons by running the following command: - ls /var/run/ceph +.. prompt:: bash -Then, execute the following, replacing ``{daemon-name}`` with an actual -daemon (e.g., ``osd.0``):: + ls /var/run/ceph - ceph daemon osd.0 help +Next, run a command of the following form (replacing ``{daemon-name}`` with the +name of a specific daemon: for example, ``osd.0``): -Alternatively, you can specify a ``{socket-file}`` (e.g., something in ``/var/run/ceph``):: +.. prompt:: bash - ceph daemon {socket-file} help + ceph daemon {daemon-name} help -The admin socket, among other things, allows you to: +Alternatively, run the command with a ``{socket-file}`` specified (a "socket +file" is a specific file in ``/var/run/ceph``): -- List your configuration at runtime -- Dump historic operations -- Dump the operation priority queue state -- Dump operations in flight -- Dump perfcounters +.. prompt:: bash -Display Freespace ------------------ + ceph daemon {socket-file} help + +The admin socket makes many tasks possible, including: + +- Listing Ceph configuration at runtime +- Dumping historic operations +- Dumping the operation priority queue state +- Dumping operations in flight +- Dumping perfcounters -Filesystem issues may arise. To display your file system's free space, execute -``df``. :: +Display Free Space +------------------ + +Filesystem issues may arise. To display your filesystems' free space, run the +following command: + +.. prompt:: bash - df -h + df -h -Execute ``df --help`` for additional usage. +To see this command's supported syntax and options, run ``df --help``. I/O Statistics -------------- -Use `iostat`_ to identify I/O-related issues. :: +The `iostat`_ tool can be used to identify I/O-related issues. Run the +following command: + +.. prompt:: bash + + iostat -x - iostat -x Diagnostic Messages ------------------- -To retrieve diagnostic messages from the kernel, use ``dmesg`` with ``less``, ``more``, ``grep`` -or ``tail``. For example:: +To retrieve diagnostic messages from the kernel, run the ``dmesg`` command and +specify the output with ``less``, ``more``, ``grep``, or ``tail``. For +example: - dmesg | grep scsi +.. prompt:: bash -Stopping w/out Rebalancing -========================== + dmesg | grep scsi + +Stopping without Rebalancing +============================ + +It might be occasionally necessary to perform maintenance on a subset of your +cluster or to resolve a problem that affects a failure domain (for example, a +rack). However, when you stop OSDs for maintenance, you might want to prevent +CRUSH from automatically rebalancing the cluster. To avert this rebalancing +behavior, set the cluster to ``noout`` by running the following command: + +.. prompt:: bash -Periodically, you may need to perform maintenance on a subset of your cluster, -or resolve a problem that affects a failure domain (e.g., a rack). If you do not -want CRUSH to automatically rebalance the cluster as you stop OSDs for -maintenance, set the cluster to ``noout`` first:: + ceph osd set noout - ceph osd set noout +.. warning:: This is more a thought exercise offered for the purpose of giving + the reader a sense of failure domains and CRUSH behavior than a suggestion + that anyone in the post-Luminous world run ``ceph osd set noout``. When the + OSDs return to an ``up`` state, rebalancing will resume and the change + introduced by the ``ceph osd set noout`` command will be reverted. -On Luminous or newer releases it is safer to set the flag only on affected OSDs. -You can do this individually :: +In Luminous and later releases, however, it is a safer approach to flag only +affected OSDs. To add or remove a ``noout`` flag to a specific OSD, run a +command like the following: - ceph osd add-noout osd.0 - ceph osd rm-noout osd.0 +.. prompt:: bash -Or an entire CRUSH bucket at a time. Say you're going to take down -``prod-ceph-data1701`` to add RAM :: + ceph osd add-noout osd.0 + ceph osd rm-noout osd.0 - ceph osd set-group noout prod-ceph-data1701 +It is also possible to flag an entire CRUSH bucket. For example, if you plan to +take down ``prod-ceph-data1701`` in order to add RAM, you might run the +following command: -Once the flag is set you can stop the OSDs and any other colocated Ceph -services within the failure domain that requires maintenance work. :: +.. prompt:: bash - systemctl stop ceph\*.service ceph\*.target + ceph osd set-group noout prod-ceph-data1701 -.. note:: Placement groups within the OSDs you stop will become ``degraded`` - while you are addressing issues with within the failure domain. +After the flag is set, stop the OSDs and any other colocated +Ceph services within the failure domain that requires maintenance work:: -Once you have completed your maintenance, restart the OSDs and any other -daemons. If you rebooted the host as part of the maintenance, these should -come back on their own without intervention. :: + systemctl stop ceph\*.service ceph\*.target - sudo systemctl start ceph.target +.. note:: When an OSD is stopped, any placement groups within the OSD are + marked as ``degraded``. -Finally, you must unset the cluster-wide``noout`` flag:: +After the maintenance is complete, it will be necessary to restart the OSDs +and any other daemons that have stopped. However, if the host was rebooted as +part of the maintenance, they do not need to be restarted and will come back up +automatically. To restart OSDs or other daemons, use a command of the following +form: - ceph osd unset noout - ceph osd unset-group noout prod-ceph-data1701 +.. prompt:: bash + + sudo systemctl start ceph.target + +Finally, unset the ``noout`` flag as needed by running commands like the +following: + +.. prompt:: bash + + ceph osd unset noout + ceph osd unset-group noout prod-ceph-data1701 + +Many contemporary Linux distributions employ ``systemd`` for service +management. However, for certain operating systems (especially older ones) it +might be necessary to issue equivalent ``service`` or ``start``/``stop`` +commands. -Note that most Linux distributions that Ceph supports today employ ``systemd`` -for service management. For other or older operating systems you may need -to issue equivalent ``service`` or ``start``/``stop`` commands. .. _osd-not-running: OSD Not Running =============== -Under normal circumstances, simply restarting the ``ceph-osd`` daemon will -allow it to rejoin the cluster and recover. +Under normal conditions, restarting a ``ceph-osd`` daemon will allow it to +rejoin the cluster and recover. + An OSD Won't Start ------------------ -If you start your cluster and an OSD won't start, check the following: - -- **Configuration File:** If you were not able to get OSDs running from - a new installation, check your configuration file to ensure it conforms - (e.g., ``host`` not ``hostname``, etc.). - -- **Check Paths:** Check the paths in your configuration, and the actual - paths themselves for data and metadata (journals, WAL, DB). If you separate the OSD data from - the metadata and there are errors in your configuration file or in the - actual mounts, you may have trouble starting OSDs. If you want to store the - metadata on a separate block device, you should partition or LVM your - drive and assign one partition per OSD. - -- **Check Max Threadcount:** If you have a node with a lot of OSDs, you may be - hitting the default maximum number of threads (e.g., usually 32k), especially - during recovery. You can increase the number of threads using ``sysctl`` to - see if increasing the maximum number of threads to the maximum possible - number of threads allowed (i.e., 4194303) will help. For example:: - - sysctl -w kernel.pid_max=4194303 - - If increasing the maximum thread count resolves the issue, you can make it - permanent by including a ``kernel.pid_max`` setting in a file under ``/etc/sysctl.d`` or - within the master ``/etc/sysctl.conf`` file. For example:: - - kernel.pid_max = 4194303 - -- **Check ``nf_conntrack``:** This connection tracking and limiting system - is the bane of many production Ceph clusters, and can be insidious in that - everything is fine at first. As cluster topology and client workload - grow, mysterious and intermittent connection failures and performance - glitches manifest, becoming worse over time and at certain times of day. - Check ``syslog`` history for table fillage events. You can mitigate this - bother by raising ``nf_conntrack_max`` to a much higher value via ``sysctl``. - Be sure to raise ``nf_conntrack_buckets`` accordingly to - ``nf_conntrack_max / 4``, which may require action outside of ``sysctl`` e.g. - ``"echo 131072 > /sys/module/nf_conntrack/parameters/hashsize`` - More interdictive but fussier is to blacklist the associated kernel modules - to disable processing altogether. This is fragile in that the modules - vary among kernel versions, as does the order in which they must be listed. - Even when blacklisted there are situations in which ``iptables`` or ``docker`` - may activate connection tracking anyway, so a "set and forget" strategy for - the tunables is advised. On modern systems this will not consume appreciable - resources. - -- **Kernel Version:** Identify the kernel version and distribution you - are using. Ceph uses some third party tools by default, which may be - buggy or may conflict with certain distributions and/or kernel - versions (e.g., Google ``gperftools`` and ``TCMalloc``). Check the - `OS recommendations`_ and the release notes for each Ceph version - to ensure you have addressed any issues related to your kernel. - -- **Segment Fault:** If there is a segment fault, increase log levels - and start the problematic daemon(s) again. If segment faults recur, - search the Ceph bug tracker `https://tracker.ceph/com/projects/ceph `_ - and the ``dev`` and ``ceph-users`` mailing list archives `https://ceph.io/resources `_. - If this is truly a new and unique - failure, post to the ``dev`` email list and provide the specific Ceph - release being run, ``ceph.conf`` (with secrets XXX'd out), - your monitor status output and excerpts from your log file(s). +If the cluster has started but an OSD isn't starting, check the following: + +- **Configuration File:** If you were not able to get OSDs running from a new + installation, check your configuration file to ensure it conforms to the + standard (for example, make sure that it says ``host`` and not ``hostname``, + etc.). + +- **Check Paths:** Ensure that the paths specified in the configuration + correspond to the paths for data and metadata that actually exist (for + example, the paths to the journals, the WAL, and the DB). Separate the OSD + data from the metadata in order to see whether there are errors in the + configuration file and in the actual mounts. If so, these errors might + explain why OSDs are not starting. To store the metadata on a separate block + device, partition or LVM the drive and assign one partition per OSD. + +- **Check Max Threadcount:** If the cluster has a node with an especially high + number of OSDs, it might be hitting the default maximum number of threads + (usually 32,000). This is especially likely to happen during recovery. + Increasing the maximum number of threads to the maximum possible number of + threads allowed (4194303) might help with the problem. To increase the number + of threads to the maximum, run the following command: + + .. prompt:: bash + + sysctl -w kernel.pid_max=4194303 + + If this increase resolves the issue, you must make the increase permanent by + including a ``kernel.pid_max`` setting either in a file under + ``/etc/sysctl.d`` or within the master ``/etc/sysctl.conf`` file. For + example:: + + kernel.pid_max = 4194303 + +- **Check ``nf_conntrack``:** This connection-tracking and connection-limiting + system causes problems for many production Ceph clusters. The problems often + emerge slowly and subtly. As cluster topology and client workload grow, + mysterious and intermittent connection failures and performance glitches + occur more and more, especially at certain times of the day. To begin taking + the measure of your problem, check the ``syslog`` history for "table full" + events. One way to address this kind of problem is as follows: First, use the + ``sysctl`` utility to assign ``nf_conntrack_max`` a much higher value. Next, + raise the value of ``nf_conntrack_buckets`` so that ``nf_conntrack_buckets`` + × 8 = ``nf_conntrack_max``; this action might require running commands + outside of ``sysctl`` (for example, ``"echo 131072 > + /sys/module/nf_conntrack/parameters/hashsize``). Another way to address the + problem is to blacklist the associated kernel modules in order to disable + processing altogether. This approach is powerful, but fragile. The modules + and the order in which the modules must be listed can vary among kernel + versions. Even when blacklisted, ``iptables`` and ``docker`` might sometimes + activate connection tracking anyway, so we advise a "set and forget" strategy + for the tunables. On modern systems, this approach will not consume + appreciable resources. + +- **Kernel Version:** Identify the kernel version and distribution that are in + use. By default, Ceph uses third-party tools that might be buggy or come into + conflict with certain distributions or kernel versions (for example, Google's + ``gperftools`` and ``TCMalloc``). Check the `OS recommendations`_ and the + release notes for each Ceph version in order to make sure that you have + addressed any issues related to your kernel. + +- **Segment Fault:** If there is a segment fault, increase log levels and + restart the problematic daemon(s). If segment faults recur, search the Ceph + bug tracker `https://tracker.ceph/com/projects/ceph + `_ and the ``dev`` and + ``ceph-users`` mailing list archives `https://ceph.io/resources + `_ to see if others have experienced and reported + these issues. If this truly is a new and unique failure, post to the ``dev`` + email list and provide the following information: the specific Ceph release + being run, ``ceph.conf`` (with secrets XXX'd out), your monitor status + output, and excerpts from your log file(s). + An OSD Failed ------------- -When a ``ceph-osd`` process dies, surviving ``ceph-osd`` daemons will report -to the mons that it appears down, which will in turn surface the new status -via the ``ceph health`` command:: +When an OSD fails, this means that a ``ceph-osd`` process is unresponsive or +has died and that the corresponding OSD has been marked ``down``. Surviving +``ceph-osd`` daemons will report to the monitors that the OSD appears to be +down, and a new status will be visible in the output of the ``ceph health`` +command, as in the following example: + +.. prompt:: bash + + ceph health + +:: + + HEALTH_WARN 1/3 in osds are down + +This health alert is raised whenever there are one or more OSDs marked ``in`` +and ``down``. To see which OSDs are ``down``, add ``detail`` to the command as in +the following example: + +.. prompt:: bash + + ceph health detail + +:: - ceph health - HEALTH_WARN 1/3 in osds are down + HEALTH_WARN 1/3 in osds are down + osd.0 is down since epoch 23, last address 192.168.106.220:6800/11080 -Specifically, you will get a warning whenever there are OSDs marked ``in`` -and ``down``. You can identify which are ``down`` with:: +Alternatively, run the following command: - ceph health detail - HEALTH_WARN 1/3 in osds are down - osd.0 is down since epoch 23, last address 192.168.106.220:6800/11080 +.. prompt:: bash -or :: + ceph osd tree down - ceph osd tree down +If there is a drive failure or another fault that is preventing a given +``ceph-osd`` daemon from functioning or restarting, then there should be an +error message present in its log file under ``/var/log/ceph``. -If there is a drive -failure or other fault preventing ``ceph-osd`` from functioning or -restarting, an error message should be present in its log file under -``/var/log/ceph``. +If the ``ceph-osd`` daemon stopped because of a heartbeat failure or a +``suicide timeout`` error, then the underlying drive or filesystem might be +unresponsive. Check ``dmesg`` output and `syslog` output for drive errors or +kernel errors. It might be necessary to specify certain flags (for example, +``dmesg -T`` to see human-readable timestamps) in order to avoid mistaking old +errors for new errors. -If the daemon stopped because of a heartbeat failure or ``suicide timeout``, -the underlying drive or filesystem may be unresponsive. Check ``dmesg`` -and `syslog` output for drive or other kernel errors. You may need to -specify something like ``dmesg -T`` to get timestamps, otherwise it's -easy to mistake old errors for new. +If an entire host's OSDs are ``down``, check to see if there is a network +error or a hardware issue with the host. + +If the OSD problem is the result of a software error (for example, a failed +assertion or another unexpected error), search for reports of the issue in the +`bug tracker `_ , the `dev mailing list +archives `_, and the +`ceph-users mailing list archives +`_. If there is no +clear fix or existing bug, then :ref:`report the problem to the ceph-devel +email list `. -If the problem is a software error (failed assertion or other -unexpected error), search the archives and tracker as above, and -report it to the `ceph-devel`_ email list if there's no clear fix or -existing bug. .. _no-free-drive-space: No Free Drive Space ------------------- -Ceph prevents you from writing to a full OSD so that you don't lose data. -In an operational cluster, you should receive a warning when your cluster's OSDs -and pools approach the full ratio. The ``mon_osd_full_ratio`` defaults to -``0.95``, or 95% of capacity before it stops clients from writing data. -The ``mon_osd_backfillfull_ratio`` defaults to ``0.90``, or 90 % of -capacity above which backfills will not start. The -OSD nearfull ratio defaults to ``0.85``, or 85% of capacity -when it generates a health warning. +If an OSD is full, Ceph prevents data loss by ensuring that no new data is +written to the OSD. In an properly running cluster, health checks are raised +when the cluster's OSDs and pools approach certain "fullness" ratios. The +``mon_osd_full_ratio`` threshold defaults to ``0.95`` (or 95% of capacity): +this is the point above which clients are prevented from writing data. The +``mon_osd_backfillfull_ratio`` threshold defaults to ``0.90`` (or 90% of +capacity): this is the point above which backfills will not start. The +``mon_osd_nearfull_ratio`` threshold defaults to ``0.85`` (or 85% of capacity): +this is the point at which it raises the ``OSD_NEARFULL`` health check. + +OSDs within a cluster will vary in how much data is allocated to them by Ceph. +To check "fullness" by displaying data utilization for every OSD, run the +following command: + +.. prompt:: bash + + ceph osd df + +To check "fullness" by displaying a cluster’s overall data usage and data +distribution among pools, run the following command: + +.. prompt:: bash + + ceph df + +When examining the output of the ``ceph df`` command, pay special attention to +the **most full** OSDs, as opposed to the percentage of raw space used. If a +single outlier OSD becomes full, all writes to this OSD's pool might fail as a +result. When ``ceph df`` reports the space available to a pool, it considers +the ratio settings relative to the *most full* OSD that is part of the pool. To +flatten the distribution, two approaches are available: (1) Using the +``reweight-by-utilization`` command to progressively move data from excessively +full OSDs or move data to insufficiently full OSDs, and (2) in later revisions +of Luminous and subsequent releases, exploiting the ``ceph-mgr`` ``balancer`` +module to perform the same task automatically. + +To adjust the "fullness" ratios, run a command or commands of the following +form: -Note that individual OSDs within a cluster will vary in how much data Ceph -allocates to them. This utilization can be displayed for each OSD with :: +.. prompt:: bash - ceph osd df + ceph osd set-nearfull-ratio + ceph osd set-full-ratio + ceph osd set-backfillfull-ratio -Overall cluster / pool fullness can be checked with :: +Sometimes full cluster issues arise because an OSD has failed. This can happen +either because of a test or because the cluster is small, very full, or +unbalanced. When an OSD or node holds an excessive percentage of the cluster's +data, component failures or natural growth can result in the ``nearfull`` and +``full`` ratios being exceeded. When testing Ceph's resilience to OSD failures +on a small cluster, it is advised to leave ample free disk space and to +consider temporarily lowering the OSD ``full ratio``, OSD ``backfillfull +ratio``, and OSD ``nearfull ratio``. - ceph df +The "fullness" status of OSDs is visible in the output of the ``ceph health`` +command, as in the following example: -Pay close attention to the **most full** OSDs, not the percentage of raw space -used as reported by ``ceph df``. It only takes one outlier OSD filling up to -fail writes to its pool. The space available to each pool as reported by -``ceph df`` considers the ratio settings relative to the *most full* OSD that -is part of a given pool. The distribution can be flattened by progressively -moving data from overfull or to underfull OSDs using the ``reweight-by-utilization`` -command. With Ceph releases beginning with later revisions of Luminous one can also -exploit the ``ceph-mgr`` ``balancer`` module to perform this task automatically -and rather effectively. +.. prompt:: bash -The ratios can be adjusted: + ceph health :: - ceph osd set-nearfull-ratio - ceph osd set-full-ratio - ceph osd set-backfillfull-ratio + HEALTH_WARN 1 nearfull osd(s) -Full cluster issues can arise when an OSD fails either as a test or organically -within small and/or very full or unbalanced cluster. When an OSD or node -holds an outsize percentage of the cluster's data, the ``nearfull`` and ``full`` -ratios may be exceeded as a result of component failures or even natural growth. -If you are testing how Ceph reacts to OSD failures on a small -cluster, you should leave ample free disk space and consider temporarily -lowering the OSD ``full ratio``, OSD ``backfillfull ratio`` and -OSD ``nearfull ratio`` +For details, add the ``detail`` command as in the following example: -Full ``ceph-osds`` will be reported by ``ceph health``:: +.. prompt:: bash - ceph health - HEALTH_WARN 1 nearfull osd(s) + ceph health detail -Or:: +:: + + HEALTH_ERR 1 full osd(s); 1 backfillfull osd(s); 1 nearfull osd(s) + osd.3 is full at 97% + osd.4 is backfill full at 91% + osd.2 is near full at 87% - ceph health detail - HEALTH_ERR 1 full osd(s); 1 backfillfull osd(s); 1 nearfull osd(s) - osd.3 is full at 97% - osd.4 is backfill full at 91% - osd.2 is near full at 87% +To address full cluster issues, it is recommended to add capacity by adding +OSDs. Adding new OSDs allows the cluster to redistribute data to newly +available storage. Search for ``rados bench`` orphans that are wasting space. -The best way to deal with a full cluster is to add capacity via new OSDs, enabling -the cluster to redistribute data to newly available storage. +If a legacy Filestore OSD cannot be started because it is full, it is possible +to reclaim space by deleting a small number of placement group directories in +the full OSD. -If you cannot start a legacy Filestore OSD because it is full, you may reclaim -some space deleting a few placement group directories in the full OSD. +.. important:: If you choose to delete a placement group directory on a full + OSD, **DO NOT** delete the same placement group directory on another full + OSD. **OTHERWISE YOU WILL LOSE DATA**. You **MUST** maintain at least one + copy of your data on at least one OSD. Deleting placement group directories + is a rare and extreme intervention. It is not to be undertaken lightly. -.. important:: If you choose to delete a placement group directory on a full OSD, - **DO NOT** delete the same placement group directory on another full OSD, or - **YOU WILL LOSE DATA**. You **MUST** maintain at least one copy of your data on - at least one OSD. This is a rare and extreme intervention, and is not to be - undertaken lightly. +See `Monitor Config Reference`_ for more information. -See `Monitor Config Reference`_ for additional details. OSDs are Slow/Unresponsive ========================== -A common issue involves slow or unresponsive OSDs. Ensure that you -have eliminated other troubleshooting possibilities before delving into OSD -performance issues. For example, ensure that your network(s) is working properly -and your OSDs are running. Check to see if OSDs are throttling recovery traffic. +OSDs are sometimes slow or unresponsive. When troubleshooting this common +problem, it is advised to eliminate other possibilities before investigating +OSD performance issues. For example, be sure to confirm that your network(s) +are working properly, to verify that your OSDs are running, and to check +whether OSDs are throttling recovery traffic. + +.. tip:: In pre-Luminous releases of Ceph, ``up`` and ``in`` OSDs were + sometimes not available or were otherwise slow because recovering OSDs were + consuming system resources. Newer releases provide better recovery handling + by preventing this phenomenon. -.. tip:: Newer versions of Ceph provide better recovery handling by preventing - recovering OSDs from using up system resources so that ``up`` and ``in`` - OSDs are not available or are otherwise slow. Networking Issues ----------------- -Ceph is a distributed storage system, so it relies upon networks for OSD peering -and replication, recovery from faults, and periodic heartbeats. Networking -issues can cause OSD latency and flapping OSDs. See `Flapping OSDs`_ for -details. +As a distributed storage system, Ceph relies upon networks for OSD peering and +replication, recovery from faults, and periodic heartbeats. Networking issues +can cause OSD latency and flapping OSDs. For more information, see `Flapping +OSDs`_. + +To make sure that Ceph processes and Ceph-dependent processes are connected and +listening, run the following commands: -Ensure that Ceph processes and Ceph-dependent processes are connected and/or -listening. :: +.. prompt:: bash - netstat -a | grep ceph - netstat -l | grep ceph - sudo netstat -p | grep ceph + netstat -a | grep ceph + netstat -l | grep ceph + sudo netstat -p | grep ceph -Check network statistics. :: +To check network statistics, run the following command: - netstat -s +.. prompt:: bash + + netstat -s Drive Configuration ------------------- -A SAS or SATA storage drive should only house one OSD; NVMe drives readily -handle two or more. Read and write throughput can bottleneck if other processes -share the drive, including journals / metadata, operating systems, Ceph monitors, -`syslog` logs, other OSDs, and non-Ceph processes. +An SAS or SATA storage drive should house only one OSD, but a NVMe drive can +easily house two or more. However, it is possible for read and write throughput +to bottleneck if other processes share the drive. Such processes include: +journals / metadata, operating systems, Ceph monitors, ``syslog`` logs, other +OSDs, and non-Ceph processes. -Ceph acknowledges writes *after* journaling, so fast SSDs are an -attractive option to accelerate the response time--particularly when -using the ``XFS`` or ``ext4`` file systems for legacy Filestore OSDs. -By contrast, the ``Btrfs`` -file system can write and journal simultaneously. (Note, however, that -we recommend against using ``Btrfs`` for production deployments.) +Because Ceph acknowledges writes *after* journaling, fast SSDs are an +attractive option for accelerating response time -- particularly when using the +``XFS`` or ``ext4`` filesystems for legacy FileStore OSDs. By contrast, the +``Btrfs`` file system can write and journal simultaneously. (However, use of +``Btrfs`` is not recommended for production deployments.) .. note:: Partitioning a drive does not change its total throughput or - sequential read/write limits. Running a journal in a separate partition - may help, but you should prefer a separate physical drive. + sequential read/write limits. Throughput might be improved somewhat by + running a journal in a separate partition, but it is better still to run + such a journal in a separate physical drive. + +.. warning:: Reef does not support FileStore. Releases after Reef do not + support FileStore. Any information that mentions FileStore is pertinent only + to the Quincy release of Ceph and to releases prior to Quincy. + Bad Sectors / Fragmented Disk ----------------------------- -Check your drives for bad blocks, fragmentation, and other errors that can cause -performance to drop substantially. Invaluable tools include ``dmesg``, ``syslog`` -logs, and ``smartctl`` (from the ``smartmontools`` package). +Check your drives for bad blocks, fragmentation, and other errors that can +cause significantly degraded performance. Tools that are useful in checking for +drive errors include ``dmesg``, ``syslog`` logs, and ``smartctl`` (found in the +``smartmontools`` package). + +.. note:: ``smartmontools`` 7.0 and late provides NVMe stat passthrough and + JSON output. + Co-resident Monitors/OSDs ------------------------- -Monitors are relatively lightweight processes, but they issue lots of -``fsync()`` calls, -which can interfere with other workloads, particularly if monitors run on the -same drive as an OSD. Additionally, if you run monitors on the same host as -OSDs, you may incur performance issues related to: +Although monitors are relatively lightweight processes, performance issues can +result when monitors are run on the same host machine as an OSD. Monitors issue +many ``fsync()`` calls and this can interfere with other workloads. The danger +of performance issues is especially acute when the monitors are co-resident on +the same storage drive as an OSD. In addition, if the monitors are running an +older kernel (pre-3.0) or a kernel with no ``syncfs(2)`` syscall, then multiple +OSDs running on the same host might make so many commits as to undermine each +other's performance. This problem sometimes results in what is called "the +bursty writes". -- Running an older kernel (pre-3.0) -- Running a kernel with no ``syncfs(2)`` syscall. - -In these cases, multiple OSDs running on the same host can drag each other down -by doing lots of commits. That often leads to the bursty writes. Co-resident Processes --------------------- -Spinning up co-resident processes (convergence) such as a cloud-based solution, virtual -machines and other applications that write data to Ceph while operating on the -same hardware as OSDs can introduce significant OSD latency. Generally, we -recommend optimizing hosts for use with Ceph and using other hosts for other -processes. The practice of separating Ceph operations from other applications -may help improve performance and may streamline troubleshooting and maintenance. +Significant OSD latency can result from processes that write data to Ceph (for +example, cloud-based solutions and virtual machines) while operating on the +same hardware as OSDs. For this reason, making such processes co-resident with +OSDs is not generally recommended. Instead, the recommended practice is to +optimize certain hosts for use with Ceph and use other hosts for other +processes. This practice of separating Ceph operations from other applications +might help improve performance and might also streamline troubleshooting and +maintenance. + +Running co-resident processes on the same hardware is sometimes called +"convergence". When using Ceph, engage in convergence only with expertise and +after consideration. + Logging Levels -------------- -If you turned logging levels up to track an issue and then forgot to turn -logging levels back down, the OSD may be putting a lot of logs onto the disk. If -you intend to keep logging levels high, you may consider mounting a drive to the -default path for logging (i.e., ``/var/log/ceph/$cluster-$name.log``). +Performance issues can result from high logging levels. Operators sometimes +raise logging levels in order to track an issue and then forget to lower them +afterwards. In such a situation, OSDs might consume valuable system resources to +write needlessly verbose logs onto the disk. Anyone who does want to use high logging +levels is advised to consider mounting a drive to the default path for logging +(for example, ``/var/log/ceph/$cluster-$name.log``). Recovery Throttling ------------------- Depending upon your configuration, Ceph may reduce recovery rates to maintain -performance or it may increase recovery rates to the point that recovery -impacts OSD performance. Check to see if the OSD is recovering. +client or OSD performance, or it may increase recovery rates to the point that +recovery impacts client or OSD performance. Check to see if the client or OSD +is recovering. + Kernel Version -------------- -Check the kernel version you are running. Older kernels may not receive -new backports that Ceph depends upon for better performance. +Check the kernel version that you are running. Older kernels may lack updates +that improve Ceph performance. + Kernel Issues with SyncFS ------------------------- -Try running one OSD per host to see if performance improves. Old kernels -might not have a recent enough version of ``glibc`` to support ``syncfs(2)``. +If you have kernel issues with SyncFS, try running one OSD per host to see if +performance improves. Old kernels might not have a recent enough version of +``glibc`` to support ``syncfs(2)``. + Filesystem Issues ----------------- -Currently, we recommend deploying clusters with the BlueStore back end. -When running a pre-Luminous release or if you have a specific reason to deploy -OSDs with the previous Filestore backend, we recommend ``XFS``. +In post-Luminous releases, we recommend deploying clusters with the BlueStore +back end. When running a pre-Luminous release, or if you have a specific +reason to deploy OSDs with the previous Filestore backend, we recommend +``XFS``. We recommend against using ``Btrfs`` or ``ext4``. The ``Btrfs`` filesystem has -many attractive features, but bugs may lead to -performance issues and spurious ENOSPC errors. We do not recommend -``ext4`` for Filestore OSDs because ``xattr`` limitations break support for long -object names, which are needed for RGW. +many attractive features, but bugs may lead to performance issues and spurious +ENOSPC errors. We do not recommend ``ext4`` for Filestore OSDs because +``xattr`` limitations break support for long object names, which are needed for +RGW. For more information, see `Filesystem Recommendations`_. @@ -442,31 +585,32 @@ For more information, see `Filesystem Recommendations`_. Insufficient RAM ---------------- -We recommend a *minimum* of 4GB of RAM per OSD daemon and suggest rounding up -from 6-8GB. You may notice that during normal operations, ``ceph-osd`` -processes only use a fraction of that amount. -Unused RAM makes it tempting to use the excess RAM for co-resident -applications or to skimp on each node's memory capacity. However, -when OSDs experience recovery their memory utilization spikes. If -there is insufficient RAM available, OSD performance will slow considerably -and the daemons may even crash or be killed by the Linux ``OOM Killer``. +We recommend a *minimum* of 4GB of RAM per OSD daemon and we suggest rounding +up from 6GB to 8GB. During normal operations, you may notice that ``ceph-osd`` +processes use only a fraction of that amount. You might be tempted to use the +excess RAM for co-resident applications or to skimp on each node's memory +capacity. However, when OSDs experience recovery their memory utilization +spikes. If there is insufficient RAM available during recovery, OSD performance +will slow considerably and the daemons may even crash or be killed by the Linux +``OOM Killer``. + Blocked Requests or Slow Requests --------------------------------- -If a ``ceph-osd`` daemon is slow to respond to a request, messages will be logged -noting ops that are taking too long. The warning threshold +When a ``ceph-osd`` daemon is slow to respond to a request, the cluster log +receives messages reporting ops that are taking too long. The warning threshold defaults to 30 seconds and is configurable via the ``osd_op_complaint_time`` -setting. When this happens, the cluster log will receive messages. +setting. Legacy versions of Ceph complain about ``old requests``:: - osd.0 192.168.106.220:6800/18813 312 : [WRN] old request osd_op(client.5099.0:790 fatty_26485_object789 [write 0~4096] 2.5e54f643) v4 received at 2012-03-06 15:42:56.054801 currently waiting for sub ops + osd.0 192.168.106.220:6800/18813 312 : [WRN] old request osd_op(client.5099.0:790 fatty_26485_object789 [write 0~4096] 2.5e54f643) v4 received at 2012-03-06 15:42:56.054801 currently waiting for sub ops -New versions of Ceph complain about ``slow requests``:: +Newer versions of Ceph complain about ``slow requests``:: - {date} {osd.num} [WRN] 1 slow requests, 1 included below; oldest blocked for > 30.005692 secs - {date} {osd.num} [WRN] slow request 30.005692 seconds old, received at {date-time}: osd_op(client.4240.0:8 benchmark_data_ceph-1_39426_object7 [write 0~4194304] 0.69848840) v4 currently waiting for subops from [610] + {date} {osd.num} [WRN] 1 slow requests, 1 included below; oldest blocked for > 30.005692 secs + {date} {osd.num} [WRN] slow request 30.005692 seconds old, received at {date-time}: osd_op(client.4240.0:8 benchmark_data_ceph-1_39426_object7 [write 0~4194304] 0.69848840) v4 currently waiting for subops from [610] Possible causes include: @@ -486,27 +630,27 @@ Possible solutions: Debugging Slow Requests ----------------------- -If you run ``ceph daemon osd. dump_historic_ops`` or ``ceph daemon osd. dump_ops_in_flight``, -you will see a set of operations and a list of events each operation went -through. These are briefly described below. +If you run ``ceph daemon osd. dump_historic_ops`` or ``ceph daemon osd. +dump_ops_in_flight``, you will see a set of operations and a list of events +each operation went through. These are briefly described below. Events from the Messenger layer: -- ``header_read``: When the messenger first started reading the message off the wire. -- ``throttled``: When the messenger tried to acquire memory throttle space to read +- ``header_read``: The time that the messenger first started reading the message off the wire. +- ``throttled``: The time that the messenger tried to acquire memory throttle space to read the message into memory. -- ``all_read``: When the messenger finished reading the message off the wire. -- ``dispatched``: When the messenger gave the message to the OSD. +- ``all_read``: The time that the messenger finished reading the message off the wire. +- ``dispatched``: The time that the messenger gave the message to the OSD. - ``initiated``: This is identical to ``header_read``. The existence of both is a historical oddity. Events from the OSD as it processes ops: - ``queued_for_pg``: The op has been put into the queue for processing by its PG. -- ``reached_pg``: The PG has started doing the op. -- ``waiting for \*``: The op is waiting for some other work to complete before it - can proceed (e.g. a new OSDMap; for its object target to scrub; for the PG to - finish peering; all as specified in the message). +- ``reached_pg``: The PG has started performing the op. +- ``waiting for \*``: The op is waiting for some other work to complete before + it can proceed (for example, a new OSDMap; the scrubbing of its object + target; the completion of a PG's peering; all as specified in the message). - ``started``: The op has been accepted as something the OSD should do and is now being performed. - ``waiting for subops from``: The op has been sent to replica OSDs. @@ -514,95 +658,115 @@ Events from the OSD as it processes ops: Events from ```Filestore```: - ``commit_queued_for_journal_write``: The op has been given to the FileStore. -- ``write_thread_in_journal_buffer``: The op is in the journal's buffer and waiting +- ``write_thread_in_journal_buffer``: The op is in the journal's buffer and is waiting to be persisted (as the next disk write). - ``journaled_completion_queued``: The op was journaled to disk and its callback - queued for invocation. + has been queued for invocation. Events from the OSD after data has been given to underlying storage: -- ``op_commit``: The op has been committed (i.e. written to journal) by the +- ``op_commit``: The op has been committed (that is, written to journal) by the primary OSD. -- ``op_applied``: The op has been `write()'en `_ to the backing FS (i.e. applied in memory but not flushed out to disk) on the primary. +- ``op_applied``: The op has been `write()'en + `_ to the backing FS (that is, + applied in memory but not flushed out to disk) on the primary. - ``sub_op_applied``: ``op_applied``, but for a replica's "subop". - ``sub_op_committed``: ``op_commit``, but for a replica's subop (only for EC pools). - ``sub_op_commit_rec/sub_op_apply_rec from ``: The primary marks this when it hears about the above, but for a particular replica (i.e. ````). - ``commit_sent``: We sent a reply back to the client (or primary OSD, for sub ops). -Many of these events are seemingly redundant, but cross important boundaries in -the internal code (such as passing data across locks into new threads). +Some of these events may appear redundant, but they cross important boundaries +in the internal code (such as passing data across locks into new threads). + Flapping OSDs ============= -When OSDs peer and check heartbeats, they use the cluster (back-end) -network when it's available. See `Monitor/OSD Interaction`_ for details. +"Flapping" is the term for the phenomenon of an OSD being repeatedly marked +``up`` and then ``down`` in rapid succession. This section explains how to +recognize flapping, and how to mitigate it. + +When OSDs peer and check heartbeats, they use the cluster (back-end) network +when it is available. See `Monitor/OSD Interaction`_ for details. -We have traditionally recommended separate *public* (front-end) and *private* -(cluster / back-end / replication) networks: +The upstream Ceph community has traditionally recommended separate *public* +(front-end) and *private* (cluster / back-end / replication) networks. This +provides the following benefits: -#. Segregation of heartbeat and replication / recovery traffic (private) - from client and OSD <-> mon traffic (public). This helps keep one - from DoS-ing the other, which could in turn result in a cascading failure. +#. Segregation of (1) heartbeat traffic and replication/recovery traffic + (private) from (2) traffic from clients and between OSDs and monitors + (public). This helps keep one stream of traffic from DoS-ing the other, + which could in turn result in a cascading failure. #. Additional throughput for both public and private traffic. -When common networking technologies were 100Mb/s and 1Gb/s, this separation -was often critical. With today's 10Gb/s, 40Gb/s, and 25/50/100Gb/s -networks, the above capacity concerns are often diminished or even obviated. -For example, if your OSD nodes have two network ports, dedicating one to -the public and the other to the private network means no path redundancy. -This degrades your ability to weather network maintenance and failures without -significant cluster or client impact. Consider instead using both links -for just a public network: with bonding (LACP) or equal-cost routing (e.g. FRR) -you reap the benefits of increased throughput headroom, fault tolerance, and -reduced OSD flapping. +In the past, when common networking technologies were measured in a range +encompassing 100Mb/s and 1Gb/s, this separation was often critical. But with +today's 10Gb/s, 40Gb/s, and 25/50/100Gb/s networks, the above capacity concerns +are often diminished or even obviated. For example, if your OSD nodes have two +network ports, dedicating one to the public and the other to the private +network means that you have no path redundancy. This degrades your ability to +endure network maintenance and network failures without significant cluster or +client impact. In situations like this, consider instead using both links for +only a public network: with bonding (LACP) or equal-cost routing (for example, +FRR) you reap the benefits of increased throughput headroom, fault tolerance, +and reduced OSD flapping. When a private network (or even a single host link) fails or degrades while the -public network operates normally, OSDs may not handle this situation well. What -happens is that OSDs use the public network to report each other ``down`` to -the monitors, while marking themselves ``up``. The monitors then send out, -again on the public network, an updated cluster map with affected OSDs marked -`down`. These OSDs reply to the monitors "I'm not dead yet!", and the cycle -repeats. We call this scenario 'flapping`, and it can be difficult to isolate -and remediate. With no private network, this irksome dynamic is avoided: -OSDs are generally either ``up`` or ``down`` without flapping. - -If something does cause OSDs to 'flap' (repeatedly getting marked ``down`` and +public network continues operating normally, OSDs may not handle this situation +well. In such situations, OSDs use the public network to report each other +``down`` to the monitors, while marking themselves ``up``. The monitors then +send out-- again on the public network--an updated cluster map with the +affected OSDs marked `down`. These OSDs reply to the monitors "I'm not dead +yet!", and the cycle repeats. We call this scenario 'flapping`, and it can be +difficult to isolate and remediate. Without a private network, this irksome +dynamic is avoided: OSDs are generally either ``up`` or ``down`` without +flapping. + +If something does cause OSDs to 'flap' (repeatedly being marked ``down`` and then ``up`` again), you can force the monitors to halt the flapping by -temporarily freezing their states:: +temporarily freezing their states: - ceph osd set noup # prevent OSDs from getting marked up - ceph osd set nodown # prevent OSDs from getting marked down +.. prompt:: bash -These flags are recorded in the osdmap:: + ceph osd set noup # prevent OSDs from getting marked up + ceph osd set nodown # prevent OSDs from getting marked down - ceph osd dump | grep flags - flags no-up,no-down +These flags are recorded in the osdmap: -You can clear the flags with:: +.. prompt:: bash - ceph osd unset noup - ceph osd unset nodown + ceph osd dump | grep flags -Two other flags are supported, ``noin`` and ``noout``, which prevent -booting OSDs from being marked ``in`` (allocated data) or protect OSDs -from eventually being marked ``out`` (regardless of what the current value for -``mon_osd_down_out_interval`` is). +:: + + flags no-up,no-down + +You can clear these flags with: + +.. prompt:: bash -.. note:: ``noup``, ``noout``, and ``nodown`` are temporary in the - sense that once the flags are cleared, the action they were blocking - should occur shortly after. The ``noin`` flag, on the other hand, - prevents OSDs from being marked ``in`` on boot, and any daemons that - started while the flag was set will remain that way. + ceph osd unset noup + ceph osd unset nodown -.. note:: The causes and effects of flapping can be somewhat mitigated through - careful adjustments to the ``mon_osd_down_out_subtree_limit``, +Two other flags are available, ``noin`` and ``noout``, which prevent booting +OSDs from being marked ``in`` (allocated data) or protect OSDs from eventually +being marked ``out`` (regardless of the current value of +``mon_osd_down_out_interval``). + +.. note:: ``noup``, ``noout``, and ``nodown`` are temporary in the sense that + after the flags are cleared, the action that they were blocking should be + possible shortly thereafter. But the ``noin`` flag prevents OSDs from being + marked ``in`` on boot, and any daemons that started while the flag was set + will remain that way. + +.. note:: The causes and effects of flapping can be mitigated somewhat by + making careful adjustments to ``mon_osd_down_out_subtree_limit``, ``mon_osd_reporter_subtree_level``, and ``mon_osd_min_down_reporters``. Derivation of optimal settings depends on cluster size, topology, and the - Ceph release in use. Their interactions are subtle and beyond the scope of - this document. + Ceph release in use. The interaction of all of these factors is subtle and + is beyond the scope of this document. .. _iostat: https://en.wikipedia.org/wiki/Iostat @@ -612,6 +776,9 @@ from eventually being marked ``out`` (regardless of what the current value for .. _Monitor/OSD Interaction: ../../configuration/mon-osd-interaction .. _Monitor Config Reference: ../../configuration/mon-config-ref .. _monitoring your OSDs: ../../operations/monitoring-osd-pg + +.. _monitoring OSDs: ../../operations/monitoring-osd-pg/#monitoring-osds + .. _subscribe to the ceph-devel email list: mailto:majordomo@vger.kernel.org?body=subscribe+ceph-devel .. _unsubscribe from the ceph-devel email list: mailto:majordomo@vger.kernel.org?body=unsubscribe+ceph-devel .. _subscribe to the ceph-users email list: mailto:ceph-users-join@lists.ceph.com diff --git a/ceph/doc/rados/troubleshooting/troubleshooting-pg.rst b/ceph/doc/rados/troubleshooting/troubleshooting-pg.rst index b7ca679ae..74d04bd9f 100644 --- a/ceph/doc/rados/troubleshooting/troubleshooting-pg.rst +++ b/ceph/doc/rados/troubleshooting/troubleshooting-pg.rst @@ -1,120 +1,128 @@ -===================== +==================== Troubleshooting PGs -===================== +==================== Placement Groups Never Get Clean ================================ -When you create a cluster and your cluster remains in ``active``, -``active+remapped`` or ``active+degraded`` status and never achieves an -``active+clean`` status, you likely have a problem with your configuration. +If, after you have created your cluster, any Placement Groups (PGs) remain in +the ``active`` status, the ``active+remapped`` status or the +``active+degraded`` status and never achieves an ``active+clean`` status, you +likely have a problem with your configuration. -You may need to review settings in the `Pool, PG and CRUSH Config Reference`_ -and make appropriate adjustments. +In such a situation, it may be necessary to review the settings in the `Pool, +PG and CRUSH Config Reference`_ and make appropriate adjustments. -As a general rule, you should run your cluster with more than one OSD and a -pool size greater than 1 object replica. +As a general rule, run your cluster with more than one OSD and a pool size +greater than two object replicas. .. _one-node-cluster: One Node Cluster ---------------- -Ceph no longer provides documentation for operating on a single node, because -you would never deploy a system designed for distributed computing on a single -node. Additionally, mounting client kernel modules on a single node containing a -Ceph daemon may cause a deadlock due to issues with the Linux kernel itself -(unless you use VMs for the clients). You can experiment with Ceph in a 1-node +Ceph no longer provides documentation for operating on a single node. Systems +designed for distributed computing by definition do not run on a single node. +The mounting of client kernel modules on a single node that contains a Ceph +daemon may cause a deadlock due to issues with the Linux kernel itself (unless +VMs are used as clients). You can experiment with Ceph in a one-node configuration, in spite of the limitations as described herein. -If you are trying to create a cluster on a single node, you must change the -default of the ``osd_crush_chooseleaf_type`` setting from ``1`` (meaning +To create a cluster on a single node, you must change the +``osd_crush_chooseleaf_type`` setting from the default of ``1`` (meaning ``host`` or ``node``) to ``0`` (meaning ``osd``) in your Ceph configuration -file before you create your monitors and OSDs. This tells Ceph that an OSD -can peer with another OSD on the same host. If you are trying to set up a -1-node cluster and ``osd_crush_chooseleaf_type`` is greater than ``0``, -Ceph will try to peer the PGs of one OSD with the PGs of another OSD on -another node, chassis, rack, row, or even datacenter depending on the setting. +file before you create your monitors and OSDs. This tells Ceph that an OSD is +permitted to place another OSD on the same host. If you are trying to set up a +single-node cluster and ``osd_crush_chooseleaf_type`` is greater than ``0``, +Ceph will attempt to place the PGs of one OSD with the PGs of another OSD on +another node, chassis, rack, row, or datacenter depending on the setting. -.. tip:: DO NOT mount kernel clients directly on the same node as your - Ceph Storage Cluster, because kernel conflicts can arise. However, you - can mount kernel clients within virtual machines (VMs) on a single node. +.. tip:: DO NOT mount kernel clients directly on the same node as your Ceph + Storage Cluster. Kernel conflicts can arise. However, you can mount kernel + clients within virtual machines (VMs) on a single node. -If you are creating OSDs using a single disk, you must create directories -for the data manually first. +If you are creating OSDs using a single disk, you must manually create +directories for the data first. Fewer OSDs than Replicas ------------------------ -If you have brought up two OSDs to an ``up`` and ``in`` state, but you still -don't see ``active + clean`` placement groups, you may have an -``osd_pool_default_size`` set to greater than ``2``. +If two OSDs are in an ``up`` and ``in`` state, but the placement gropus are not +in an ``active + clean`` state, you may have an ``osd_pool_default_size`` set +to greater than ``2``. There are a few ways to address this situation. If you want to operate your cluster in an ``active + degraded`` state with two replicas, you can set the -``osd_pool_default_min_size`` to ``2`` so that you can write objects in -an ``active + degraded`` state. You may also set the ``osd_pool_default_size`` -setting to ``2`` so that you only have two stored replicas (the original and -one replica), in which case the cluster should achieve an ``active + clean`` +``osd_pool_default_min_size`` to ``2`` so that you can write objects in an +``active + degraded`` state. You may also set the ``osd_pool_default_size`` +setting to ``2`` so that you have only two stored replicas (the original and +one replica). In such a case, the cluster should achieve an ``active + clean`` state. -.. note:: You can make the changes at runtime. If you make the changes in - your Ceph configuration file, you may need to restart your cluster. +.. note:: You can make the changes while the cluster is running. If you make + the changes in your Ceph configuration file, you might need to restart your + cluster. Pool Size = 1 ------------- -If you have the ``osd_pool_default_size`` set to ``1``, you will only have -one copy of the object. OSDs rely on other OSDs to tell them which objects -they should have. If a first OSD has a copy of an object and there is no -second copy, then no second OSD can tell the first OSD that it should have -that copy. For each placement group mapped to the first OSD (see -``ceph pg dump``), you can force the first OSD to notice the placement groups -it needs by running:: +If you have ``osd_pool_default_size`` set to ``1``, you will have only one copy +of the object. OSDs rely on other OSDs to tell them which objects they should +have. If one OSD has a copy of an object and there is no second copy, then +there is no second OSD to tell the first OSD that it should have that copy. For +each placement group mapped to the first OSD (see ``ceph pg dump``), you can +force the first OSD to notice the placement groups it needs by running a +command of the following form: - ceph osd force-create-pg +.. prompt:: bash + + ceph osd force-create-pg CRUSH Map Errors ---------------- -Another candidate for placement groups remaining unclean involves errors +If any placement groups in your cluster are unclean, then there might be errors in your CRUSH map. Stuck Placement Groups ====================== -It is normal for placement groups to enter states like "degraded" or "peering" -following a failure. Normally these states indicate the normal progression -through the failure recovery process. However, if a placement group stays in one -of these states for a long time this may be an indication of a larger problem. -For this reason, the monitor will warn when placement groups get "stuck" in a -non-optimal state. Specifically, we check for: +It is normal for placement groups to enter "degraded" or "peering" states after +a component failure. Normally, these states reflect the expected progression +through the failure recovery process. However, a placement group that stays in +one of these states for a long time might be an indication of a larger problem. +For this reason, the Ceph Monitors will warn when placement groups get "stuck" +in a non-optimal state. Specifically, we check for: + +* ``inactive`` - The placement group has not been ``active`` for too long (that + is, it hasn't been able to service read/write requests). -* ``inactive`` - The placement group has not been ``active`` for too long - (i.e., it hasn't been able to service read/write requests). +* ``unclean`` - The placement group has not been ``clean`` for too long (that + is, it hasn't been able to completely recover from a previous failure). -* ``unclean`` - The placement group has not been ``clean`` for too long - (i.e., it hasn't been able to completely recover from a previous failure). +* ``stale`` - The placement group status has not been updated by a + ``ceph-osd``. This indicates that all nodes storing this placement group may + be ``down``. -* ``stale`` - The placement group status has not been updated by a ``ceph-osd``, - indicating that all nodes storing this placement group may be ``down``. +List stuck placement groups by running one of the following commands: -You can explicitly list stuck placement groups with one of:: +.. prompt:: bash - ceph pg dump_stuck stale - ceph pg dump_stuck inactive - ceph pg dump_stuck unclean + ceph pg dump_stuck stale + ceph pg dump_stuck inactive + ceph pg dump_stuck unclean -For stuck ``stale`` placement groups, it is normally a matter of getting the -right ``ceph-osd`` daemons running again. For stuck ``inactive`` placement -groups, it is usually a peering problem (see :ref:`failures-osd-peering`). For -stuck ``unclean`` placement groups, there is usually something preventing -recovery from completing, like unfound objects (see -:ref:`failures-osd-unfound`); +- Stuck ``stale`` placement groups usually indicate that key ``ceph-osd`` + daemons are not running. +- Stuck ``inactive`` placement groups usually indicate a peering problem (see + :ref:`failures-osd-peering`). +- Stuck ``unclean`` placement groups usually indicate that something is + preventing recovery from completing, possibly unfound objects (see + :ref:`failures-osd-unfound`); @@ -123,21 +131,28 @@ recovery from completing, like unfound objects (see Placement Group Down - Peering Failure ====================================== -In certain cases, the ``ceph-osd`` `Peering` process can run into -problems, preventing a PG from becoming active and usable. For -example, ``ceph health`` might report:: +In certain cases, the ``ceph-osd`` `peering` process can run into problems, +which can prevent a PG from becoming active and usable. In such a case, running +the command ``ceph health detail`` will report something similar to the following: + +.. prompt:: bash + + ceph health detail - ceph health detail - HEALTH_ERR 7 pgs degraded; 12 pgs down; 12 pgs peering; 1 pgs recovering; 6 pgs stuck unclean; 114/3300 degraded (3.455%); 1/3 in osds are down - ... - pg 0.5 is down+peering - pg 1.4 is down+peering - ... - osd.1 is down since epoch 69, last address 192.168.106.220:6801/8651 +:: + + HEALTH_ERR 7 pgs degraded; 12 pgs down; 12 pgs peering; 1 pgs recovering; 6 pgs stuck unclean; 114/3300 degraded (3.455%); 1/3 in osds are down + ... + pg 0.5 is down+peering + pg 1.4 is down+peering + ... + osd.1 is down since epoch 69, last address 192.168.106.220:6801/8651 -We can query the cluster to determine exactly why the PG is marked ``down`` with:: +Query the cluster to determine exactly why the PG is marked ``down`` by running a command of the following form: - ceph pg 0.5 query +.. prompt:: bash + + ceph pg 0.5 query .. code-block:: javascript @@ -164,21 +179,24 @@ We can query the cluster to determine exactly why the PG is marked ``down`` with ] } -The ``recovery_state`` section tells us that peering is blocked due to -down ``ceph-osd`` daemons, specifically ``osd.1``. In this case, we can start that ``ceph-osd`` -and things will recover. +The ``recovery_state`` section tells us that peering is blocked due to down +``ceph-osd`` daemons, specifically ``osd.1``. In this case, we can start that +particular ``ceph-osd`` and recovery will proceed. + +Alternatively, if there is a catastrophic failure of ``osd.1`` (for example, if +there has been a disk failure), the cluster can be informed that the OSD is +``lost`` and the cluster can be instructed that it must cope as best it can. -Alternatively, if there is a catastrophic failure of ``osd.1`` (e.g., disk -failure), we can tell the cluster that it is ``lost`` and to cope as -best it can. +.. important:: Informing the cluster that an OSD has been lost is dangerous + because the cluster cannot guarantee that the other copies of the data are + consistent and up to date. -.. important:: This is dangerous in that the cluster cannot - guarantee that the other copies of the data are consistent - and up to date. +To report an OSD ``lost`` and to instruct Ceph to continue to attempt recovery +anyway, run a command of the following form: -To instruct Ceph to continue anyway:: +.. prompt:: bash - ceph osd lost 1 + ceph osd lost 1 Recovery will proceed. @@ -188,32 +206,43 @@ Recovery will proceed. Unfound Objects =============== -Under certain combinations of failures Ceph may complain about -``unfound`` objects:: +Under certain combinations of failures, Ceph may complain about ``unfound`` +objects, as in this example: + +.. prompt:: bash - ceph health detail - HEALTH_WARN 1 pgs degraded; 78/3778 unfound (2.065%) - pg 2.4 is active+degraded, 78 unfound + ceph health detail -This means that the storage cluster knows that some objects (or newer -copies of existing objects) exist, but it hasn't found copies of them. -One example of how this might come about for a PG whose data is on ceph-osds -1 and 2: +:: + + HEALTH_WARN 1 pgs degraded; 78/3778 unfound (2.065%) + pg 2.4 is active+degraded, 78 unfound + +This means that the storage cluster knows that some objects (or newer copies of +existing objects) exist, but it hasn't found copies of them. Here is an +example of how this might come about for a PG whose data is on two OSDS, which +we will call "1" and "2": * 1 goes down * 2 handles some writes, alone * 1 comes up -* 1 and 2 repeer, and the objects missing on 1 are queued for recovery. +* 1 and 2 re-peer, and the objects missing on 1 are queued for recovery. * Before the new objects are copied, 2 goes down. -Now 1 knows that these object exist, but there is no live ``ceph-osd`` who -has a copy. In this case, IO to those objects will block, and the -cluster will hope that the failed node comes back soon; this is -assumed to be preferable to returning an IO error to the user. +At this point, 1 knows that these objects exist, but there is no live +``ceph-osd`` that has a copy of the objects. In this case, IO to those objects +will block, and the cluster will hope that the failed node comes back soon. +This is assumed to be preferable to returning an IO error to the user. + +.. note:: The situation described immediately above is one reason that setting + ``size=2`` on a replicated pool and ``m=1`` on an erasure coded pool risks + data loss. -First, you can identify which objects are unfound with:: +Identify which objects are unfound by running a command of the following form: - ceph pg 2.4 list_unfound [starting offset, in json] +.. prompt:: bash + + ceph pg 2.4 list_unfound [starting offset, in json] .. code-block:: javascript @@ -252,22 +281,24 @@ First, you can identify which objects are unfound with:: "more": false } -If there are too many objects to list in a single result, the ``more`` -field will be true and you can query for more. (Eventually the -command line tool will hide this from you, but not yet.) +If there are too many objects to list in a single result, the ``more`` field +will be true and you can query for more. (Eventually the command line tool +will hide this from you, but not yet.) + +Now you can identify which OSDs have been probed or might contain data. -Second, you can identify which OSDs have been probed or might contain -data. +At the end of the listing (before ``more: false``), ``might_have_unfound`` is +provided when ``available_might_have_unfound`` is true. This is equivalent to +the output of ``ceph pg #.# query``. This eliminates the need to use ``query`` +directly. The ``might_have_unfound`` information given behaves the same way as +that ``query`` does, which is described below. The only difference is that +OSDs that have the status of ``already probed`` are ignored. -At the end of the listing (before ``more`` is false), ``might_have_unfound`` is provided -when ``available_might_have_unfound`` is true. This is equivalent to the output -of ``ceph pg #.# query``. This eliminates the need to use ``query`` directly. -The ``might_have_unfound`` information given behaves the same way as described below for ``query``. -The only difference is that OSDs that have ``already probed`` status are ignored. +Use of ``query``: -Use of ``query``:: +.. prompt:: bash - ceph pg 2.4 query + ceph pg 2.4 query .. code-block:: javascript @@ -278,8 +309,8 @@ Use of ``query``:: { "osd": 1, "status": "osd is down"}]}, -In this case, for example, the cluster knows that ``osd.1`` might have -data, but it is ``down``. The full range of possible states include: +In this case, the cluster knows that ``osd.1`` might have data, but it is +``down``. Here is the full range of possible states: * already probed * querying @@ -289,106 +320,135 @@ data, but it is ``down``. The full range of possible states include: Sometimes it simply takes some time for the cluster to query possible locations. -It is possible that there are other locations where the object can -exist that are not listed. For example, if a ceph-osd is stopped and -taken out of the cluster, the cluster fully recovers, and due to some -future set of failures ends up with an unfound object, it won't -consider the long-departed ceph-osd as a potential location to -consider. (This scenario, however, is unlikely.) +It is possible that there are other locations where the object might exist that +are not listed. For example: if an OSD is stopped and taken out of the cluster +and then the cluster fully recovers, and then through a subsequent set of +failures the cluster ends up with an unfound object, the cluster will ignore +the removed OSD. (This scenario, however, is unlikely.) -If all possible locations have been queried and objects are still -lost, you may have to give up on the lost objects. This, again, is -possible given unusual combinations of failures that allow the cluster -to learn about writes that were performed before the writes themselves -are recovered. To mark the "unfound" objects as "lost":: +If all possible locations have been queried and objects are still lost, you may +have to give up on the lost objects. This, again, is possible only when unusual +combinations of failures have occurred that allow the cluster to learn about +writes that were performed before the writes themselves have been recovered. To +mark the "unfound" objects as "lost", run a command of the following form: - ceph pg 2.5 mark_unfound_lost revert|delete +.. prompt:: bash -This the final argument specifies how the cluster should deal with -lost objects. + ceph pg 2.5 mark_unfound_lost revert|delete -The "delete" option will forget about them entirely. +Here the final argument (``revert|delete``) specifies how the cluster should +deal with lost objects. -The "revert" option (not available for erasure coded pools) will -either roll back to a previous version of the object or (if it was a -new object) forget about it entirely. Use this with caution, as it -may confuse applications that expected the object to exist. +The ``delete`` option will cause the cluster to forget about them entirely. +The ``revert`` option (which is not available for erasure coded pools) will +either roll back to a previous version of the object or (if it was a new +object) forget about the object entirely. Use ``revert`` with caution, as it +may confuse applications that expect the object to exist. Homeless Placement Groups ========================= -It is possible for all OSDs that had copies of a given placement groups to fail. -If that's the case, that subset of the object store is unavailable, and the -monitor will receive no status updates for those placement groups. To detect -this situation, the monitor marks any placement group whose primary OSD has -failed as ``stale``. For example:: +It is possible that every OSD that has copies of a given placement group fails. +If this happens, then the subset of the object store that contains those +placement groups becomes unavailable and the monitor will receive no status +updates for those placement groups. The monitor marks as ``stale`` any +placement group whose primary OSD has failed. For example: + +.. prompt:: bash + + ceph health - ceph health - HEALTH_WARN 24 pgs stale; 3/300 in osds are down +:: -You can identify which placement groups are ``stale``, and what the last OSDs to -store them were, with:: + HEALTH_WARN 24 pgs stale; 3/300 in osds are down - ceph health detail - HEALTH_WARN 24 pgs stale; 3/300 in osds are down - ... - pg 2.5 is stuck stale+active+remapped, last acting [2,0] - ... - osd.10 is down since epoch 23, last address 192.168.106.220:6800/11080 - osd.11 is down since epoch 13, last address 192.168.106.220:6803/11539 - osd.12 is down since epoch 24, last address 192.168.106.220:6806/11861 +Identify which placement groups are ``stale`` and which were the last OSDs to +store the ``stale`` placement groups by running the following command: -If we want to get placement group 2.5 back online, for example, this tells us that -it was last managed by ``osd.0`` and ``osd.2``. Restarting those ``ceph-osd`` -daemons will allow the cluster to recover that placement group (and, presumably, -many others). +.. prompt:: bash + + ceph health detail + +:: + + HEALTH_WARN 24 pgs stale; 3/300 in osds are down + ... + pg 2.5 is stuck stale+active+remapped, last acting [2,0] + ... + osd.10 is down since epoch 23, last address 192.168.106.220:6800/11080 + osd.11 is down since epoch 13, last address 192.168.106.220:6803/11539 + osd.12 is down since epoch 24, last address 192.168.106.220:6806/11861 + +This output indicates that placement group 2.5 (``pg 2.5``) was last managed by +``osd.0`` and ``osd.2``. Restart those OSDs to allow the cluster to recover +that placement group. Only a Few OSDs Receive Data ============================ -If you have many nodes in your cluster and only a few of them receive data, -`check`_ the number of placement groups in your pool. Since placement groups get -mapped to OSDs, a small number of placement groups will not distribute across -your cluster. Try creating a pool with a placement group count that is a -multiple of the number of OSDs. See `Placement Groups`_ for details. The default -placement group count for pools is not useful, but you can change it `here`_. +If only a few of the nodes in the cluster are receiving data, check the number +of placement groups in the pool as instructed in the :ref:`Placement Groups +` documentation. Since placement groups get mapped to +OSDs in an operation involving dividing the number of placement groups in the +cluster by the number of OSDs in the cluster, a small number of placement +groups (the remainder, in this operation) are sometimes not distributed across +the cluster. In situations like this, create a pool with a placement group +count that is a multiple of the number of OSDs. See `Placement Groups`_ for +details. See the :ref:`Pool, PG, and CRUSH Config Reference +` for instructions on changing the default +values used to determine how many placement groups are assigned to each pool. Can't Write Data ================ -If your cluster is up, but some OSDs are down and you cannot write data, -check to ensure that you have the minimum number of OSDs running for the -placement group. If you don't have the minimum number of OSDs running, -Ceph will not allow you to write data because there is no guarantee -that Ceph can replicate your data. See ``osd_pool_default_min_size`` -in the `Pool, PG and CRUSH Config Reference`_ for details. +If the cluster is up, but some OSDs are down and you cannot write data, make +sure that you have the minimum number of OSDs running in the pool. If you don't +have the minimum number of OSDs running in the pool, Ceph will not allow you to +write data to it because there is no guarantee that Ceph can replicate your +data. See ``osd_pool_default_min_size`` in the :ref:`Pool, PG, and CRUSH +Config Reference ` for details. PGs Inconsistent ================ -If you receive an ``active + clean + inconsistent`` state, this may happen -due to an error during scrubbing. As always, we can identify the inconsistent -placement group(s) with:: +If the command ``ceph health detail`` returns an ``active + clean + +inconsistent`` state, this might indicate an error during scrubbing. Identify +the inconsistent placement group or placement groups by running the following +command: + +.. prompt:: bash $ ceph health detail + +:: + HEALTH_ERR 1 pgs inconsistent; 2 scrub errors pg 0.6 is active+clean+inconsistent, acting [0,1,2] 2 scrub errors -Or if you prefer inspecting the output in a programmatic way:: +Alternatively, run this command if you prefer to inspect the output in a +programmatic way: + +.. prompt:: bash + + $ rados list-inconsistent-pg rbd + +:: - $ rados list-inconsistent-pg rbd ["0.6"] There is only one consistent state, but in the worst case, we could have different inconsistencies in multiple perspectives found in more than one -objects. If an object named ``foo`` in PG ``0.6`` is truncated, we will have:: +object. If an object named ``foo`` in PG ``0.6`` is truncated, the output of +``rados list-inconsistent-pg rbd`` will look something like this: + +.. prompt:: bash - $ rados list-inconsistent-obj 0.6 --format=json-pretty + rados list-inconsistent-obj 0.6 --format=json-pretty .. code-block:: javascript @@ -442,82 +502,103 @@ objects. If an object named ``foo`` in PG ``0.6`` is truncated, we will have:: ] } -In this case, we can learn from the output: +In this case, the output indicates the following: -* The only inconsistent object is named ``foo``, and it is its head that has +* The only inconsistent object is named ``foo``, and its head has inconsistencies. * The inconsistencies fall into two categories: - * ``errors``: these errors indicate inconsistencies between shards without a - determination of which shard(s) are bad. Check for the ``errors`` in the - `shards` array, if available, to pinpoint the problem. - - * ``data_digest_mismatch``: the digest of the replica read from OSD.2 is - different from the ones of OSD.0 and OSD.1 - * ``size_mismatch``: the size of the replica read from OSD.2 is 0, while - the size reported by OSD.0 and OSD.1 is 968. - * ``union_shard_errors``: the union of all shard specific ``errors`` in - ``shards`` array. The ``errors`` are set for the given shard that has the - problem. They include errors like ``read_error``. The ``errors`` ending in - ``oi`` indicate a comparison with ``selected_object_info``. Look at the - ``shards`` array to determine which shard has which error(s). - - * ``data_digest_mismatch_info``: the digest stored in the object-info is not - ``0xffffffff``, which is calculated from the shard read from OSD.2 - * ``size_mismatch_info``: the size stored in the object-info is different - from the one read from OSD.2. The latter is 0. - -You can repair the inconsistent placement group by executing:: + #. ``errors``: these errors indicate inconsistencies between shards, without + an indication of which shard(s) are bad. Check for the ``errors`` in the + ``shards`` array, if available, to pinpoint the problem. + + * ``data_digest_mismatch``: the digest of the replica read from ``OSD.2`` + is different from the digests of the replica reads of ``OSD.0`` and + ``OSD.1`` + * ``size_mismatch``: the size of the replica read from ``OSD.2`` is ``0``, + but the size reported by ``OSD.0`` and ``OSD.1`` is ``968``. + + #. ``union_shard_errors``: the union of all shard-specific ``errors`` in the + ``shards`` array. The ``errors`` are set for the shard with the problem. + These errors include ``read_error`` and other similar errors. The + ``errors`` ending in ``oi`` indicate a comparison with + ``selected_object_info``. Examine the ``shards`` array to determine + which shard has which error or errors. + + * ``data_digest_mismatch_info``: the digest stored in the ``object-info`` + is not ``0xffffffff``, which is calculated from the shard read from + ``OSD.2`` + * ``size_mismatch_info``: the size stored in the ``object-info`` is + different from the size read from ``OSD.2``. The latter is ``0``. + +.. warning:: If ``read_error`` is listed in a shard's ``errors`` attribute, the + inconsistency is likely due to physical storage errors. In cases like this, + check the storage used by that OSD. + + Examine the output of ``dmesg`` and ``smartctl`` before attempting a drive + repair. + +To repair the inconsistent placement group, run a command of the following +form: + +.. prompt:: bash + + ceph pg repair {placement-group-ID} + +.. warning: This command overwrites the "bad" copies with "authoritative" + copies. In most cases, Ceph is able to choose authoritative copies from all + the available replicas by using some predefined criteria. This, however, + does not work in every case. For example, it might be the case that the + stored data digest is missing, which means that the calculated digest is + ignored when Ceph chooses the authoritative copies. Be aware of this, and + use the above command with caution. - ceph pg repair {placement-group-ID} - -Which overwrites the `bad` copies with the `authoritative` ones. In most cases, -Ceph is able to choose authoritative copies from all available replicas using -some predefined criteria. But this does not always work. For example, the stored -data digest could be missing, and the calculated digest will be ignored when -choosing the authoritative copies. So, please use the above command with caution. - -If ``read_error`` is listed in the ``errors`` attribute of a shard, the -inconsistency is likely due to disk errors. You might want to check your disk -used by that OSD. If you receive ``active + clean + inconsistent`` states periodically due to -clock skew, you may consider configuring your `NTP`_ daemons on your -monitor hosts to act as peers. See `The Network Time Protocol`_ and Ceph -`Clock Settings`_ for additional details. +clock skew, consider configuring the `NTP +`_ daemons on your monitor +hosts to act as peers. See `The Network Time Protocol `_ +and Ceph :ref:`Clock Settings ` for more information. Erasure Coded PGs are not active+clean ====================================== -When CRUSH fails to find enough OSDs to map to a PG, it will show as a -``2147483647`` which is ITEM_NONE or ``no OSD found``. For instance:: +If CRUSH fails to find enough OSDs to map to a PG, it will show as a +``2147483647`` which is ``ITEM_NONE`` or ``no OSD found``. For example:: [2,1,6,0,5,8,2147483647,7,4] Not enough OSDs --------------- -If the Ceph cluster only has 8 OSDs and the erasure coded pool needs -9, that is what it will show. You can either create another erasure -coded pool that requires less OSDs:: +If the Ceph cluster has only eight OSDs and an erasure coded pool needs nine +OSDs, the cluster will show "Not enough OSDs". In this case, you either create +another erasure coded pool that requires fewer OSDs, by running commands of the +following form: + +.. prompt:: bash ceph osd erasure-code-profile set myprofile k=5 m=3 ceph osd pool create erasurepool erasure myprofile -or add a new OSDs and the PG will automatically use them. +or add new OSDs, and the PG will automatically use them. CRUSH constraints cannot be satisfied ------------------------------------- -If the cluster has enough OSDs, it is possible that the CRUSH rule -imposes constraints that cannot be satisfied. If there are 10 OSDs on -two hosts and the CRUSH rule requires that no two OSDs from the -same host are used in the same PG, the mapping may fail because only -two OSDs will be found. You can check the constraint by displaying ("dumping") -the rule:: +If the cluster has enough OSDs, it is possible that the CRUSH rule is imposing +constraints that cannot be satisfied. If there are ten OSDs on two hosts and +the CRUSH rule requires that no two OSDs from the same host are used in the +same PG, the mapping may fail because only two OSDs will be found. Check the +constraint by displaying ("dumping") the rule, as shown here: + +.. prompt:: bash + + ceph osd crush rule ls + +:: - $ ceph osd crush rule ls [ "replicated_rule", "erasurepool"] @@ -535,36 +616,43 @@ the rule:: { "op": "emit"}]} -You can resolve the problem by creating a new pool in which PGs are allowed -to have OSDs residing on the same host with:: +Resolve this problem by creating a new pool in which PGs are allowed to have +OSDs residing on the same host by running the following commands: - ceph osd erasure-code-profile set myprofile crush-failure-domain=osd - ceph osd pool create erasurepool erasure myprofile +.. prompt:: bash + + ceph osd erasure-code-profile set myprofile crush-failure-domain=osd + ceph osd pool create erasurepool erasure myprofile CRUSH gives up too soon ----------------------- -If the Ceph cluster has just enough OSDs to map the PG (for instance a -cluster with a total of 9 OSDs and an erasure coded pool that requires -9 OSDs per PG), it is possible that CRUSH gives up before finding a -mapping. It can be resolved by: +If the Ceph cluster has just enough OSDs to map the PG (for instance a cluster +with a total of nine OSDs and an erasure coded pool that requires nine OSDs per +PG), it is possible that CRUSH gives up before finding a mapping. This problem +can be resolved by: -* lowering the erasure coded pool requirements to use less OSDs per PG - (that requires the creation of another pool as erasure code profiles - cannot be dynamically modified). +* lowering the erasure coded pool requirements to use fewer OSDs per PG (this + requires the creation of another pool, because erasure code profiles cannot + be modified dynamically). -* adding more OSDs to the cluster (that does not require the erasure - coded pool to be modified, it will become clean automatically) +* adding more OSDs to the cluster (this does not require the erasure coded pool + to be modified, because it will become clean automatically) -* use a handmade CRUSH rule that tries more times to find a good - mapping. This can be done by setting ``set_choose_tries`` to a value - greater than the default. +* using a handmade CRUSH rule that tries more times to find a good mapping. + This can be modified for an existing CRUSH rule by setting + ``set_choose_tries`` to a value greater than the default. -You should first verify the problem with ``crushtool`` after -extracting the crushmap from the cluster so your experiments do not -modify the Ceph cluster and only work on a local files:: +First, verify the problem by using ``crushtool`` after extracting the crushmap +from the cluster. This ensures that your experiments do not modify the Ceph +cluster and that they operate only on local files: + +.. prompt:: bash + + ceph osd crush rule dump erasurepool + +:: - $ ceph osd crush rule dump erasurepool { "rule_id": 1, "rule_name": "erasurepool", "type": 3, @@ -586,44 +674,54 @@ modify the Ceph cluster and only work on a local files:: bad mapping rule 8 x 79 num_rep 9 result [6,0,2,1,4,7,2147483647,5,8] bad mapping rule 8 x 173 num_rep 9 result [0,4,6,8,2,1,3,7,2147483647] -Where ``--num-rep`` is the number of OSDs the erasure code CRUSH -rule needs, ``--rule`` is the value of the ``rule_id`` field -displayed by ``ceph osd crush rule dump``. The test will try mapping -one million values (i.e. the range defined by ``[--min-x,--max-x]``) -and must display at least one bad mapping. If it outputs nothing it -means all mappings are successful and you can stop right there: the -problem is elsewhere. +Here, ``--num-rep`` is the number of OSDs that the erasure code CRUSH rule +needs, ``--rule`` is the value of the ``rule_id`` field that was displayed by +``ceph osd crush rule dump``. This test will attempt to map one million values +(in this example, the range defined by ``[--min-x,--max-x]``) and must display +at least one bad mapping. If this test outputs nothing, all mappings have been +successful and you can be assured that the problem with your cluster is not +caused by bad mappings. -The CRUSH rule can be edited by decompiling the crush map:: +Changing the value of set_choose_tries +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - $ crushtool --decompile crush.map > crush.txt +#. Decompile the CRUSH map to edit the CRUSH rule by running the following + command: -and adding the following line to the rule:: + .. prompt:: bash - step set_choose_tries 100 + crushtool --decompile crush.map > crush.txt -The relevant part of the ``crush.txt`` file should look something -like:: +#. Add the following line to the rule:: - rule erasurepool { - id 1 - type erasure - step set_chooseleaf_tries 5 - step set_choose_tries 100 - step take default - step chooseleaf indep 0 type host - step emit - } + step set_choose_tries 100 -It can then be compiled and tested again:: + The relevant part of the ``crush.txt`` file will resemble this:: - $ crushtool --compile crush.txt -o better-crush.map + rule erasurepool { + id 1 + type erasure + step set_chooseleaf_tries 5 + step set_choose_tries 100 + step take default + step chooseleaf indep 0 type host + step emit + } -When all mappings succeed, an histogram of the number of tries that -were necessary to find all of them can be displayed with the -``--show-choose-tries`` option of ``crushtool``:: +#. Recompile and retest the CRUSH rule: - $ crushtool -i better-crush.map --test --show-bad-mappings \ + .. prompt:: bash + + crushtool --compile crush.txt -o better-crush.map + +#. When all mappings succeed, display a histogram of the number of tries that + were necessary to find all of the mapping by using the + ``--show-choose-tries`` option of the ``crushtool`` command, as in the + following example: + + .. prompt:: bash + + crushtool -i better-crush.map --test --show-bad-mappings \ --show-choose-tries \ --rule 1 \ --num-rep 9 \ @@ -673,14 +771,12 @@ were necessary to find all of them can be displayed with the 104: 0 ... -It took 11 tries to map 42 PGs, 12 tries to map 44 PGs etc. The highest number of tries is the minimum value of ``set_choose_tries`` that prevents bad mappings (i.e. 103 in the above output because it did not take more than 103 tries for any PG to be mapped). + This output indicates that it took eleven tries to map forty-two PGs, twelve + tries to map forty-four PGs etc. The highest number of tries is the minimum + value of ``set_choose_tries`` that prevents bad mappings (for example, + ``103`` in the above output, because it did not take more than 103 tries for + any PG to be mapped). .. _check: ../../operations/placement-groups#get-the-number-of-placement-groups -.. _here: ../../configuration/pool-pg-config-ref .. _Placement Groups: ../../operations/placement-groups .. _Pool, PG and CRUSH Config Reference: ../../configuration/pool-pg-config-ref -.. _NTP: https://en.wikipedia.org/wiki/Network_Time_Protocol -.. _The Network Time Protocol: http://www.ntp.org/ -.. _Clock Settings: ../../configuration/mon-config-ref/#clock - - diff --git a/ceph/doc/radosgw/admin.rst b/ceph/doc/radosgw/admin.rst index 687409c1e..8d70252fe 100644 --- a/ceph/doc/radosgw/admin.rst +++ b/ceph/doc/radosgw/admin.rst @@ -476,23 +476,40 @@ commands. :: Rate Limit Management ===================== -The Ceph Object Gateway enables you to set rate limits on users and buckets. -Rate limit includes the maximum number of read ops and write ops per minute -and how many bytes per minute could be written or read per user or per bucket. -Requests that are using GET or HEAD method in the REST request are considered as "read requests", otherwise they are considered as "write requests". -Every Object Gateway tracks per user and bucket metrics separately, these metrics are not shared with other gateways. -That means that the desired limits configured should be divide by the number of active Object Gateways. -For example, if userA should be limited by 10 ops per minute and there are 2 Object Gateways in the cluster, -the limit over userA should be 5 (10 ops per minute / 2 RGWs). -If the requests are **not** balanced between RGWs, the rate limit may be underutilized. -For example, if the ops limit is 5 and there are 2 RGWs, **but** the Load Balancer send load only to one of those RGWs, -The effective limit would be 5 ops, because this limit is enforced per RGW. -If there is a limit reached for bucket not for user or vice versa the request would be cancelled as well. -The bandwidth counting happens after the request is being accepted, as a result, even if in the middle of the request the bucket/user has reached its bandwidth limit this request will proceed. -The RGW will keep a "debt" of used bytes more than the configured value and will prevent this user/bucket from sending more requests until there "debt" is being paid. -The "debt" maximum size is twice the max-read/write-bytes per minute. -If userA has 1 byte read limit per minute and this user tries to GET 1 GB object, the user will be able to do it. -After userA completes this 1GB operation, the RGW will block the user request for up to 2 minutes until userA will be able to send GET request again. +The Ceph Object Gateway makes it possible to set rate limits on users and +buckets. "Rate limit" includes the maximum number of read operations (read +ops) and write operations (write ops) per minute and the number of bytes per +minute that can be written or read per user or per bucket. + +Operations that use the ``GET`` method or the ``HEAD`` method in their REST +requests are "read requests". All other requests are "write requests". + +Each object gateway tracks per-user metrics separately from bucket metrics. +These metrics are not shared with other gateways. The configured limits should +be divided by the number of active object gateways. For example, if "user A" is +to be be limited to 10 ops per minute and there are two object gateways in the +cluster, then the limit on "user A" should be ``5`` (10 ops per minute / 2 +RGWs). If the requests are **not** balanced between RGWs, the rate limit might +be underutilized. For example: if the ops limit is ``5`` and there are two +RGWs, **but** the Load Balancer sends load to only one of those RGWs, the +effective limit is 5 ops, because this limit is enforced per RGW. If the rate +limit that has been set for the bucket has been reached but the rate limit that +has been set for the user has not been reached, then the request is cancelled. +The contrary holds as well: if the rate limit that has been set for the user +has been reached but the rate limit that has been set for the bucket has not +been reached, then the request is cancelled. + +The accounting of bandwidth happens only after a request has been accepted. +This means that requests will proceed even if the bucket rate limit or user +rate limit is reached during the execution of the request. The RGW keeps track +of a "debt" consisting of bytes used in excess of the configured value; users +or buckets that incur this kind of debt are prevented from sending more +requests until the "debt" has been repaid. The maximum size of the "debt" is +twice the max-read/write-bytes per minute. If "user A" is subject to a 1-byte +read limit per minute and they attempt to GET an object that is 1 GB in size, +then the ``GET`` action will fail. After "user A" has completed this 1 GB +operation, RGW blocks the user's requests for up to two minutes. After this +time has elapsed, "user A" will be able to send ``GET`` requests again. - **Bucket:** The ``--bucket`` option allows you to specify a rate limit for a diff --git a/ceph/doc/radosgw/config-ref.rst b/ceph/doc/radosgw/config-ref.rst index b6de649df..916ff4ff5 100644 --- a/ceph/doc/radosgw/config-ref.rst +++ b/ceph/doc/radosgw/config-ref.rst @@ -79,7 +79,7 @@ workload with a smaller number of buckets but higher number of objects (hundreds per bucket you would consider decreasing :confval:`rgw_lc_max_wp_worker` from the default value of 3. .. note:: When looking to tune either of these specific values please validate the - current Cluster performance and Ceph Object Gateway utilization before increasing. + current Cluster performance and Ceph Object Gateway utilization before increasing. Garbage Collection Settings =========================== @@ -97,8 +97,9 @@ To view the queue of objects awaiting garbage collection, execute the following radosgw-admin gc list -.. note:: specify ``--include-all`` to list all entries, including unexpired - +.. note:: Specify ``--include-all`` to list all entries, including unexpired + Garbage Collection objects. + Garbage collection is a background activity that may execute continuously or during times of low loads, depending upon how the administrator configures the Ceph Object Gateway. By default, the Ceph Object @@ -121,7 +122,9 @@ configuration parameters. :Tuning Garbage Collection for Delete Heavy Workloads: -As an initial step towards tuning Ceph Garbage Collection to be more aggressive the following options are suggested to be increased from their default configuration values:: +As an initial step towards tuning Ceph Garbage Collection to be more +aggressive the following options are suggested to be increased from their +default configuration values:: rgw_gc_max_concurrent_io = 20 rgw_gc_max_trim_chunk = 64 @@ -270,7 +273,7 @@ to support future methods of scheduling requests. Currently the scheduler defaults to a throttler which throttles the active connections to a configured limit. QoS based on mClock is currently in an *experimental* phase and not recommended for production yet. Current -implementation of *dmclock_client* op queue divides RGW Ops on admin, auth +implementation of *dmclock_client* op queue divides RGW ops on admin, auth (swift auth, sts) metadata & data requests. diff --git a/ceph/doc/radosgw/dynamicresharding.rst b/ceph/doc/radosgw/dynamicresharding.rst index db4ab57be..b8bd68d9e 100644 --- a/ceph/doc/radosgw/dynamicresharding.rst +++ b/ceph/doc/radosgw/dynamicresharding.rst @@ -6,38 +6,39 @@ RGW Dynamic Bucket Index Resharding .. versionadded:: Luminous -A large bucket index can lead to performance problems. In order -to address this problem we introduced bucket index sharding. +A large bucket index can lead to performance problems, which can +be addressed by sharding bucket indexes. Until Luminous, changing the number of bucket shards (resharding) -needed to be done offline. Starting with Luminous we support -online bucket resharding. +needed to be done offline, with RGW services disabled. +Since the Luminous release Ceph has supported online bucket resharding. Each bucket index shard can handle its entries efficiently up until -reaching a certain threshold number of entries. If this threshold is +reaching a certain threshold. If this threshold is exceeded the system can suffer from performance issues. The dynamic resharding feature detects this situation and automatically increases -the number of shards used by the bucket index, resulting in a -reduction of the number of entries in each bucket index shard. This -process is transparent to the user. Write I/Os to the target bucket -are blocked and read I/Os are not during resharding process. +the number of shards used by a bucket's index, resulting in a +reduction of the number of entries in each shard. This +process is transparent to the user. Writes to the target bucket +are blocked (but reads are not) briefly during resharding process. By default dynamic bucket index resharding can only increase the number of bucket index shards to 1999, although this upper-bound is a configuration parameter (see Configuration below). When -possible, the process chooses a prime number of bucket index shards to -spread the number of bucket index entries across the bucket index +possible, the process chooses a prime number of shards in order to +spread the number of entries across the bucket index shards more evenly. -The detection process runs in a background process that periodically -scans all the buckets. A bucket that requires resharding is added to -the resharding queue and will be scheduled to be resharded later. The -reshard thread runs in the background and execute the scheduled -resharding tasks, one at a time. +Detection of resharding opportunities runs as a background process +that periodically +scans all buckets. A bucket that requires resharding is added to +a queue. A thread runs in the background and processes the queueued +resharding tasks, one at a time and in order. Multisite ========= -Prior to the Reef release, RGW does not support dynamic resharding in a +With Ceph releases Prior to Reef, the Ceph Object Gateway (RGW) does not support +dynamic resharding in a multisite environment. For information on dynamic resharding, see :ref:`Resharding ` in the RGW multisite documentation. @@ -50,11 +51,11 @@ Enable/Disable dynamic bucket index resharding: Configuration options that control the resharding process: -- ``rgw_max_objs_per_shard``: maximum number of objects per bucket index shard before resharding is triggered, default: 100000 objects +- ``rgw_max_objs_per_shard``: maximum number of objects per bucket index shard before resharding is triggered, default: 100000 -- ``rgw_max_dynamic_shards``: maximum number of shards that dynamic bucket index resharding can increase to, default: 1999 +- ``rgw_max_dynamic_shards``: maximum number of bucket index shards that dynamic resharding can increase to, default: 1999 -- ``rgw_reshard_bucket_lock_duration``: duration, in seconds, of lock on bucket obj during resharding, default: 360 seconds (i.e., 6 minutes) +- ``rgw_reshard_bucket_lock_duration``: duration, in seconds, that writes to the bucket are locked during resharding, default: 360 (i.e., 6 minutes) - ``rgw_reshard_thread_interval``: maximum time, in seconds, between rounds of resharding queue processing, default: 600 seconds (i.e., 10 minutes) @@ -91,9 +92,9 @@ Bucket resharding status # radosgw-admin reshard status --bucket -The output is a json array of 3 objects (reshard_status, new_bucket_instance_id, num_shards) per shard. +The output is a JSON array of 3 objects (reshard_status, new_bucket_instance_id, num_shards) per shard. -For example, the output at different Dynamic Resharding stages is shown below: +For example, the output at each dynamic resharding stage is shown below: ``1. Before resharding occurred:`` :: @@ -122,7 +123,7 @@ For example, the output at different Dynamic Resharding stages is shown below: } ] -``3, After resharding completed:`` +``3. After resharding completed:`` :: [ @@ -142,7 +143,7 @@ For example, the output at different Dynamic Resharding stages is shown below: Cancel pending bucket resharding -------------------------------- -Note: Ongoing bucket resharding operations cannot be cancelled. :: +Note: Bucket resharding operations cannot be cancelled while executing. :: # radosgw-admin reshard cancel --bucket @@ -153,25 +154,24 @@ Manual immediate bucket resharding # radosgw-admin bucket reshard --bucket --num-shards -When choosing a number of shards, the administrator should keep a -number of items in mind. Ideally the administrator is aiming for no -more than 100000 entries per shard, now and through some future point -in time. +When choosing a number of shards, the administrator must anticipate each +bucket's peak number of objects. Ideally one should aim for no +more than 100000 entries per shard at any given time. -Additionally, bucket index shards that are prime numbers tend to work -better in evenly distributing bucket index entries across the -shards. For example, 7001 bucket index shards is better than 7000 +Additionally, bucket index shards that are prime numbers are more effective +in evenly distributing bucket index entries. +For example, 7001 bucket index shards is better than 7000 since the former is prime. A variety of web sites have lists of prime -numbers; search for "list of prime numbers" withy your favorite web +numbers; search for "list of prime numbers" with your favorite search engine to locate some web sites. Troubleshooting =============== Clusters prior to Luminous 12.2.11 and Mimic 13.2.5 left behind stale bucket -instance entries, which were not automatically cleaned up. The issue also affected -LifeCycle policies, which were not applied to resharded buckets anymore. Both of -these issues can be worked around using a couple of radosgw-admin commands. +instance entries, which were not automatically cleaned up. This issue also affected +LifeCycle policies, which were no longer applied to resharded buckets. Both of +these issues could be worked around by running ``radosgw-admin`` commands. Stale instance management ------------------------- @@ -183,7 +183,7 @@ List the stale instances in a cluster that are ready to be cleaned up. # radosgw-admin reshard stale-instances list Clean up the stale instances in a cluster. Note: cleanup of these -instances should only be done on a single site cluster. +instances should only be done on a single-site cluster. :: @@ -193,11 +193,12 @@ instances should only be done on a single site cluster. Lifecycle fixes --------------- -For clusters that had resharded instances, it is highly likely that the old +For clusters with resharded instances, it is highly likely that the old lifecycle processes would have flagged and deleted lifecycle processing as the -bucket instance changed during a reshard. While this is fixed for newer clusters -(from Mimic 13.2.6 and Luminous 12.2.12), older buckets that had lifecycle policies and -that have undergone resharding will have to be manually fixed. +bucket instance changed during a reshard. While this is fixed for buckets +deployed on newer Ceph releases (from Mimic 13.2.6 and Luminous 12.2.12), +older buckets that had lifecycle policies and that have undergone +resharding must be fixed manually. The command to do so is: @@ -206,8 +207,8 @@ The command to do so is: # radosgw-admin lc reshard fix --bucket {bucketname} -As a convenience wrapper, if the ``--bucket`` argument is dropped then this -command will try and fix lifecycle policies for all the buckets in the cluster. +If the ``--bucket`` argument is not provided, this +command will try to fix lifecycle policies for all the buckets in the cluster. Object Expirer fixes -------------------- @@ -217,7 +218,7 @@ been dropped from the log pool and never deleted after the bucket was resharded. This would happen if their expiration time was before the cluster was upgraded, but if their expiration was after the upgrade the objects would be correctly handled. To manage these expire-stale -objects, radosgw-admin provides two subcommands. +objects, ``radosgw-admin`` provides two subcommands. Listing: diff --git a/ceph/doc/radosgw/multisite.rst b/ceph/doc/radosgw/multisite.rst index b1ca121fe..c7627371d 100644 --- a/ceph/doc/radosgw/multisite.rst +++ b/ceph/doc/radosgw/multisite.rst @@ -770,7 +770,13 @@ to a multi-site system, follow these steps: radosgw-admin zonegroup rename --rgw-zonegroup default --zonegroup-new-name= radosgw-admin zone rename --rgw-zone default --zone-new-name us-east-1 --rgw-zonegroup= -3. Configure the master zonegroup. Replace ```` with the realm name or +3. Rename the default zonegroup's ``api_name``. Replace ```` with the zonegroup name: + + .. prompt:: bash # + + radosgw-admin zonegroup modify --api-name= --rgw-zonegroup= + +4. Configure the master zonegroup. Replace ```` with the realm name or zonegroup name. Replace ```` with the fully qualified domain name(s) in the zonegroup: @@ -778,7 +784,7 @@ to a multi-site system, follow these steps: radosgw-admin zonegroup modify --rgw-realm= --rgw-zonegroup= --endpoints http://:80 --master --default -4. Configure the master zone. Replace ```` with the realm name, zone +5. Configure the master zone. Replace ```` with the realm name, zone name, or zonegroup name. Replace ```` with the fully qualified domain name(s) in the zonegroup: @@ -789,7 +795,7 @@ to a multi-site system, follow these steps: --access-key= --secret= \ --master --default -5. Create a system user. Replace ```` with the username. Replace +6. Create a system user. Replace ```` with the username. Replace ```` with a display name. The display name is allowed to contain spaces: @@ -800,13 +806,13 @@ to a multi-site system, follow these steps: --access-key= \ --secret= --system -6. Commit the updated configuration: +7. Commit the updated configuration: .. prompt:: bash # radosgw-admin period update --commit -7. Restart the Ceph Object Gateway: +8. Restart the Ceph Object Gateway: .. prompt:: bash # @@ -1588,7 +1594,7 @@ Zone Features Some multisite features require support from all zones before they can be enabled. Each zone lists its ``supported_features``, and each zonegroup lists its ``enabled_features``. Before a feature can be enabled in the zonegroup, it must be supported by all of its zones. -On creation of new zones and zonegroups, all known features are supported/enabled. After upgrading an existing multisite configuration, however, new features must be enabled manually. +On creation of new zones and zonegroups, all known features are supported and some features (see table below) are enabled by default. After upgrading an existing multisite configuration, however, new features must be enabled manually. Supported Features ------------------ diff --git a/ceph/doc/radosgw/notifications.rst b/ceph/doc/radosgw/notifications.rst index ad36029a2..1d18772b2 100644 --- a/ceph/doc/radosgw/notifications.rst +++ b/ceph/doc/radosgw/notifications.rst @@ -188,8 +188,7 @@ Request parameters: specified CA will be used to authenticate the broker. The default CA will not be used. - amqp-exchange: The exchanges must exist and must be able to route messages - based on topics. This parameter is mandatory. Different topics that point - to the same endpoint must use the same exchange. + based on topics. This parameter is mandatory. - amqp-ack-level: No end2end acking is required. Messages may persist in the broker before being delivered to their final destinations. Three ack methods exist: diff --git a/ceph/doc/radosgw/s3-notification-compatibility.rst b/ceph/doc/radosgw/s3-notification-compatibility.rst index 9a101306a..1627ed0c4 100644 --- a/ceph/doc/radosgw/s3-notification-compatibility.rst +++ b/ceph/doc/radosgw/s3-notification-compatibility.rst @@ -13,7 +13,7 @@ Supported Destination --------------------- AWS supports: **SNS**, **SQS** and **Lambda** as possible destinations (AWS internal destinations). -Currently, we support: **HTTP/S**, **Kafka** and **AMQP**. And also support pulling and acking of events stored in Ceph (as an internal destination). +Currently, we support: **HTTP/S**, **Kafka** and **AMQP**. We are using the **SNS** ARNs to represent the **HTTP/S**, **Kafka** and **AMQP** destinations. diff --git a/ceph/doc/radosgw/s3.rst b/ceph/doc/radosgw/s3.rst index 694f89167..cb5eb3adb 100644 --- a/ceph/doc/radosgw/s3.rst +++ b/ceph/doc/radosgw/s3.rst @@ -91,14 +91,8 @@ The following common request header fields are not supported: +----------------------------+------------+ | Name | Type | +============================+============+ -| **Server** | Response | -+----------------------------+------------+ -| **x-amz-delete-marker** | Response | -+----------------------------+------------+ | **x-amz-id-2** | Response | +----------------------------+------------+ -| **x-amz-version-id** | Response | -+----------------------------+------------+ .. _Amazon S3 API: http://docs.aws.amazon.com/AmazonS3/latest/API/APIRest.html .. _S3 Notification Compatibility: ../s3-notification-compatibility diff --git a/ceph/doc/releases/index.rst b/ceph/doc/releases/index.rst index 815a0282b..47279c8c5 100644 --- a/ceph/doc/releases/index.rst +++ b/ceph/doc/releases/index.rst @@ -21,6 +21,7 @@ security fixes. :maxdepth: 1 :hidden: + Reef (v18.2.*) Quincy (v17.2.*) Pacific (v16.2.*) @@ -58,8 +59,11 @@ receive bug fixes or backports). Release timeline ---------------- -.. ceph_timeline_gantt:: releases.yml quincy pacific -.. ceph_timeline:: releases.yml quincy pacific +.. ceph_timeline_gantt:: releases.yml reef quincy +.. ceph_timeline:: releases.yml reef quincy + +.. _Reef: reef +.. _18.2.0: reef#v18-2-0-reef .. _Quincy: quincy .. _17.2.0: quincy#v17-2-0-quincy diff --git a/ceph/doc/releases/reef.rst b/ceph/doc/releases/reef.rst new file mode 100644 index 000000000..6f4f82783 --- /dev/null +++ b/ceph/doc/releases/reef.rst @@ -0,0 +1,551 @@ +==== +Reef +==== + +Reef is the 18th stable release of Ceph. It is named after the reef squid +(Sepioteuthis). + +v18.2.0 Reef +============ + +This is the first stable release of Ceph Reef. + +.. important:: + + We are unable to build Ceph on Debian stable (bookworm) for the 18.2.0 + release because of Debian bug + https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1030129. We will build as + soon as this bug is resolved in Debian stable. + + *last updated 2023 Aug 04* + +Major Changes from Quincy +-------------------------- + +Highlights +~~~~~~~~~~ + +See the relevant sections below for more details on these changes. + +* **RADOS** FileStore is not supported in Reef. +* **RADOS:** RocksDB has been upgraded to version 7.9.2. +* **RADOS:** There have been significant improvements to RocksDB iteration overhead and performance. +* **RADOS:** The ``perf dump`` and ``perf schema`` commands have been deprecated in + favor of the new ``counter dump`` and ``counter schema`` commands. +* **RADOS:** Cache tiering is now deprecated. +* **RADOS:** A new feature, the "read balancer", is now available, which allows users to balance primary PGs per pool on their clusters. +* **RGW:** Bucket resharding is now supported for multi-site configurations. +* **RGW:** There have been significant improvements to the stability and consistency of multi-site replication. +* **RGW:** Compression is now supported for objects uploaded with Server-Side Encryption. +* **Dashboard:** There is a new Dashboard page with improved layout. Active alerts and some important charts are now displayed inside cards. +* **RBD:** Support for layered client-side encryption has been added. +* **Telemetry**: Users can now opt in to participate in a leaderboard in the telemetry public dashboards. + +CephFS +~~~~~~ + +* CephFS: The ``mds_max_retries_on_remount_failure`` option has been renamed to + ``client_max_retries_on_remount_failure`` and moved from ``mds.yaml.in`` to + ``mds-client.yaml.in``. This change was made because the option has always + been used only by the MDS client. +* CephFS: It is now possible to delete the recovered files in the + ``lost+found`` directory after a CephFS post has been recovered in accordance + with disaster recovery procedures. +* The ``AT_NO_ATTR_SYNC`` macro has been deprecated in favor of the standard + ``AT_STATX_DONT_SYNC`` macro. The ``AT_NO_ATTR_SYNC`` macro will be removed + in the future. + +Dashboard +~~~~~~~~~ + +* There is a new Dashboard page with improved layout. Active alerts + and some important charts are now displayed inside cards. + +* Cephx Auth Management: There is a new section dedicated to listing and + managing Ceph cluster users. + +* RGW Server Side Encryption: The SSE-S3 and KMS encryption of rgw buckets can + now be configured at the time of bucket creation. + +* RBD Snapshot mirroring: Snapshot mirroring can now be configured through UI. + Snapshots can now be scheduled. + +* 1-Click OSD Creation Wizard: OSD creation has been broken into 3 options: + + #. Cost/Capacity Optimized: Use all HDDs + + #. Throughput Optimized: Combine HDDs and SSDs + + #. IOPS Optimized: Use all NVMes + + The current OSD-creation form has been moved to the Advanced section. + +* Centralized Logging: There is now a view that collects all the logs from + the Ceph cluster. + +* Accessibility WCAG-AA: Dashboard is WCAG 2.1 level A compliant and therefore + improved for blind and visually impaired Ceph users. + +* Monitoring & Alerting + + * Ceph-exporter: Now the performance metrics for Ceph daemons are + exported by ceph-exporter, which deploys on each daemon rather than + using prometheus exporter. This will reduce performance bottlenecks. + + * Monitoring stacks updated: + + * Prometheus 2.43.0 + + * Node-exporter 1.5.0 + + * Grafana 9.4.7 + + * Alertmanager 0.25.0 + +MGR +~~~ + +* mgr/snap_schedule: The snap-schedule manager module now retains one snapshot + less than the number mentioned against the config option + ``mds_max_snaps_per_dir``. This means that a new snapshot can be created and + retained during the next schedule run. +* The ``ceph mgr dump`` command now outputs ``last_failure_osd_epoch`` and + ``active_clients`` fields at the top level. Previously, these fields were + output under the ``always_on_modules`` field. + +RADOS +~~~~~ + +* FileStore is not supported in Reef. +* RocksDB has been upgraded to version 7.9.2, which incorporates several + performance improvements and features. This is the first release that can + tune RocksDB settings per column family, which allows for more granular + tunings to be applied to different kinds of data stored in RocksDB. New + default settings have been used to optimize performance for most workloads, with a + slight penalty in some use cases. This slight penalty is outweighed by large + improvements in compactions and write amplification in use cases such as RGW + (up to a measured 13.59% improvement in 4K random write IOPs). +* Trimming of PGLog dups is now controlled by the size rather than the version. + This change fixes the PGLog inflation issue that was happening when the + online (in OSD) trimming got jammed after a PG split operation. Also, a new + offline mechanism has been added: ``ceph-objectstore-tool`` has a new + operation called ``trim-pg-log-dups`` that targets situations in which an OSD + is unable to boot because of the inflated dups. In such situations, the "You + can be hit by THE DUPS BUG" warning is visible in OSD logs. Relevant tracker: + https://tracker.ceph.com/issues/53729 +* The RADOS Python bindings are now able to process (opt-in) omap keys as bytes + objects. This allows interacting with RADOS omap keys that are not + decodable as UTF-8 strings. +* mClock Scheduler: The mClock scheduler (the default scheduler in Quincy) has + undergone significant usability and design improvements to address the slow + backfill issue. The following is a list of some important changes: + + * The ``balanced`` profile is set as the default mClock profile because it + represents a compromise between prioritizing client I/O and prioritizing + recovery I/O. Users can then choose either the ``high_client_ops`` profile + to prioritize client I/O or the ``high_recovery_ops`` profile to prioritize + recovery I/O. + * QoS parameters including ``reservation`` and ``limit`` are now specified in + terms of a fraction (range: 0.0 to 1.0) of the OSD's IOPS capacity. + * The cost parameters (``osd_mclock_cost_per_io_usec_*`` and + ``osd_mclock_cost_per_byte_usec_*``) have been removed. The cost of an + operation is now a function of the random IOPS and maximum sequential + bandwidth capability of the OSD's underlying device. + * Degraded object recovery is given higher priority than misplaced + object recovery because degraded objects present a data safety issue that + is not present with objects that are merely misplaced. As a result, + backfilling operations with the ``balanced`` and ``high_client_ops`` mClock + profiles might progress more slowly than in the past, when backfilling + operations used the 'WeightedPriorityQueue' (WPQ) scheduler. + * The QoS allocations in all the mClock profiles are optimized in + accordance with the above fixes and enhancements. + * For more details, see: + https://docs.ceph.com/en/reef/rados/configuration/mclock-config-ref/ +* A new feature, the "read balancer", is now available, which allows + users to balance primary PGs per pool on their clusters. The read balancer is + currently available as an offline option via the ``osdmaptool``. By providing + a copy of their osdmap and a pool they want balanced to the ``osdmaptool``, users + can generate a preview of optimal primary PG mappings that they can then choose to + apply to their cluster. For more details, see + https://docs.ceph.com/en/latest/dev/balancer-design/#read-balancing +* The ``active_clients`` array displayed by the ``ceph mgr dump`` command now + has a ``name`` field that shows the name of the manager module that + registered a RADOS client. Previously, the ``active_clients`` array showed + the address of a module's RADOS client, but not the name of the module. +* The ``perf dump`` and ``perf schema`` commands have been deprecated in + favor of the new ``counter dump`` and ``counter schema`` commands. These new + commands add support for labeled perf counters and also emit existing + unlabeled perf counters. Some unlabeled perf counters became labeled in this + release, and more will be labeled in future releases; such converted perf + counters are no longer emitted by the ``perf dump`` and ``perf schema`` + commands. +* Cache tiering is now deprecated. +* The SPDK backend for BlueStore can now connect to an NVMeoF target. This + is not an officially supported feature. + +RBD +~~~ + +* The semantics of compare-and-write C++ API (`Image::compare_and_write` and + `Image::aio_compare_and_write` methods) now match those of C API. Both + compare and write steps operate only on len bytes even if the buffers + associated with them are larger. The previous behavior of comparing up to the + size of the compare buffer was prone to subtle breakage upon straddling a + stripe unit boundary. +* The ``compare-and-write`` operation is no longer limited to 512-byte + sectors. Assuming proper alignment, it now allows operating on stripe units + (4MB by default). +* There is a new ``rbd_aio_compare_and_writev`` API method that supports + scatter/gather on compare buffers as well as on write buffers. This + complements the existing ``rbd_aio_readv`` and ``rbd_aio_writev`` methods. +* The ``rbd device unmap`` command now has a ``--namespace`` option. + Support for namespaces was added to RBD in Nautilus 14.2.0, and since then it + has been possible to map and unmap images in namespaces using the + ``image-spec`` syntax. However, the corresponding option available in most + other commands was missing. +* All rbd-mirror daemon perf counters have become labeled and are now + emitted only by the new ``counter dump`` and ``counter schema`` commands. As + part of the conversion, many were also renamed in order to better + disambiguate journal-based and snapshot-based mirroring. +* The list-watchers C++ API (`Image::list_watchers`) now clears the passed + `std::list` before appending to it. This aligns with the semantics of the C + API (``rbd_watchers_list``). +* Trailing newline in passphrase files (for example: the + ```` argument of the ``rbd encryption format`` command and + the ``--encryption-passphrase-file`` option of other commands) is no longer + stripped. +* Support for layered client-side encryption has been added. It is now + possible to encrypt cloned images with a distinct encryption format and + passphrase, differing from that of the parent image and from that of every + other cloned image. The efficient copy-on-write semantics intrinsic to + unformatted (regular) cloned images have been retained. + +RGW +~~~ + +* Bucket resharding is now supported for multi-site configurations. This + feature is enabled by default for new deployments. Existing deployments must + enable the ``resharding`` feature manually after all zones have upgraded. + See https://docs.ceph.com/en/reef/radosgw/multisite/#zone-features for + details. +* The RGW policy parser now rejects unknown principals by default. If you are + mirroring policies between RGW and AWS, you might want to set + ``rgw_policy_reject_invalid_principals`` to ``false``. This change affects + only newly set policies, not policies that are already in place. +* RGW's default backend for ``rgw_enable_ops_log`` has changed from ``RADOS`` + to ``file``. The default value of ``rgw_ops_log_rados`` is now ``false``, and + ``rgw_ops_log_file_path`` now defaults to + ``/var/log/ceph/ops-log-$cluster-$name.log``. +* RGW's pubsub interface now returns boolean fields using ``bool``. Before this + change, ``/topics/`` returned ``stored_secret`` and + ``persistent`` using a string of ``"true"`` or ``"false"`` that contains + enclosing quotation marks. After this change, these fields are returned + without enclosing quotation marks so that the fields can be decoded as + boolean values in JSON. The same is true of the ``is_truncated`` field + returned by ``/subscriptions/``. +* RGW's response of ``Action=GetTopicAttributes&TopicArn=`` REST + API now returns ``HasStoredSecret`` and ``Persistent`` as boolean in the JSON + string that is encoded in ``Attributes/EndPoint``. +* All boolean fields that were previously rendered as strings by the + ``rgw-admin`` command when the JSON format was used are now rendered as + boolean. If your scripts and tools rely on this behavior, update them + accordingly. The following is a list of the field names impacted by this + change: + + * ``absolute`` + * ``add`` + * ``admin`` + * ``appendable`` + * ``bucket_key_enabled`` + * ``delete_marker`` + * ``exists`` + * ``has_bucket_info`` + * ``high_precision_time`` + * ``index`` + * ``is_master`` + * ``is_prefix`` + * ``is_truncated`` + * ``linked`` + * ``log_meta`` + * ``log_op`` + * ``pending_removal`` + * ``read_only`` + * ``retain_head_object`` + * ``rule_exist`` + * ``start_with_full_sync`` + * ``sync_from_all`` + * ``syncstopped`` + * ``system`` + * ``truncated`` + * ``user_stats_sync`` +* The Beast front end's HTTP access log line now uses a new + ``debug_rgw_access`` configurable. It has the same defaults as + ``debug_rgw``, but it can be controlled independently. +* The pubsub functionality for storing bucket notifications inside Ceph + has been removed. As a result, the pubsub zone should not be used anymore. + The following have also been removed: the REST operations, ``radosgw-admin`` + commands for manipulating subscriptions, fetching the notifications, and + acking the notifications. + + If the endpoint to which the notifications are sent is down or disconnected, + we recommend that you use persistent notifications to guarantee their + delivery. If the system that consumes the notifications has to pull them + (instead of the notifications being pushed to the system), use an external + message bus (for example, RabbitMQ or Kafka) for that purpose. +* The serialized format of notification and topics has changed. This means + that new and updated topics will be unreadable by old RGWs. We recommend + completing the RGW upgrades before creating or modifying any notification + topics. +* Compression is now supported for objects uploaded with Server-Side + Encryption. When both compression and encryption are enabled, compression is + applied before encryption. Earlier releases of multisite do not replicate + such objects correctly, so all zones must upgrade to Reef before enabling the + `compress-encrypted` zonegroup feature: see + https://docs.ceph.com/en/reef/radosgw/multisite/#zone-features and note the + security considerations. + +Telemetry +~~~~~~~~~ + +* Users who have opted in to telemetry can also opt in to + participate in a leaderboard in the telemetry public dashboards + (https://telemetry-public.ceph.com/). In addition, users are now able to + provide a description of their cluster that will appear publicly in the + leaderboard. For more details, see: + https://docs.ceph.com/en/reef/mgr/telemetry/#leaderboard. To see a sample + report, run ``ceph telemetry preview``. To opt in to telemetry, run ``ceph + telemetry on``. To opt in to the leaderboard, run ``ceph config set mgr + mgr/telemetry/leaderboard true``. To add a leaderboard description, run + ``ceph config set mgr mgr/telemetry/leaderboard_description ‘Cluster + description’`` (entering your own cluster description). + +Upgrading from Pacific or Quincy +-------------------------------- + +Before starting, make sure your cluster is stable and healthy (no down or recovering OSDs). (This is optional, but recommended.) You can disable the autoscaler for all pools during the upgrade using the noautoscale flag. + + +.. note:: + + You can monitor the progress of your upgrade at each stage with the ``ceph versions`` command, which will tell you what ceph version(s) are running for each type of daemon. + +Upgrading cephadm clusters +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your cluster is deployed with cephadm (first introduced in Octopus), then the upgrade process is entirely automated. To initiate the upgrade, + + .. prompt:: bash # + + ceph orch upgrade start --image quay.io/ceph/ceph:v18.2.0 + +The same process is used to upgrade to future minor releases. + +Upgrade progress can be monitored with + + .. prompt:: bash # + + ceph orch upgrade status + +Upgrade progress can also be monitored with `ceph -s` (which provides a simple progress bar) or more verbosely with + + .. prompt:: bash # + + ceph -W cephadm + +The upgrade can be paused or resumed with + + .. prompt:: bash # + + ceph orch upgrade pause # to pause + ceph orch upgrade resume # to resume + +or canceled with + +.. prompt:: bash # + + ceph orch upgrade stop + +Note that canceling the upgrade simply stops the process; there is no ability to downgrade back to Pacific or Quincy. + +Upgrading non-cephadm clusters +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + + 1. If your cluster is running Pacific (16.2.x) or later, you might choose to first convert it to use cephadm so that the upgrade to Reef is automated (see above). + For more information, see https://docs.ceph.com/en/reef/cephadm/adoption/. + + 2. If your cluster is running Pacific (16.2.x) or later, systemd unit file names have changed to include the cluster fsid. To find the correct systemd unit file name for your cluster, run following command: + + ``` + systemctl -l | grep + ``` + + Example: + + ``` + $ systemctl -l | grep mon | grep active + ceph-6ce0347c-314a-11ee-9b52-000af7995d6c@mon.f28-h21-000-r630.service loaded active running Ceph mon.f28-h21-000-r630 for 6ce0347c-314a-11ee-9b52-000af7995d6c + ``` + +#. Set the `noout` flag for the duration of the upgrade. (Optional, but recommended.) + + .. prompt:: bash # + + ceph osd set noout + +#. Upgrade monitors by installing the new packages and restarting the monitor daemons. For example, on each monitor host + + .. prompt:: bash # + + systemctl restart ceph-mon.target + + Once all monitors are up, verify that the monitor upgrade is complete by looking for the `reef` string in the mon map. The command + + .. prompt:: bash # + + ceph mon dump | grep min_mon_release + + should report: + + .. prompt:: bash # + + min_mon_release 18 (reef) + + If it does not, that implies that one or more monitors hasn't been upgraded and restarted and/or the quorum does not include all monitors. + +#. Upgrade `ceph-mgr` daemons by installing the new packages and restarting all manager daemons. For example, on each manager host, + + .. prompt:: bash # + + systemctl restart ceph-mgr.target + + Verify the `ceph-mgr` daemons are running by checking `ceph -s`: + + .. prompt:: bash # + + ceph -s + + :: + + ... + services: + mon: 3 daemons, quorum foo,bar,baz + mgr: foo(active), standbys: bar, baz + ... + +#. Upgrade all OSDs by installing the new packages and restarting the ceph-osd daemons on all OSD hosts + + .. prompt:: bash # + + systemctl restart ceph-osd.target + +#. Upgrade all CephFS MDS daemons. For each CephFS file system, + + #. Disable standby_replay: + + .. prompt:: bash # + + ceph fs set allow_standby_replay false + + #. If upgrading from Pacific <=16.2.5: + + .. prompt:: bash # + + ceph config set mon mon_mds_skip_sanity true + + #. Reduce the number of ranks to 1. (Make note of the original number of MDS daemons first if you plan to restore it later.) + + .. prompt:: bash # + + ceph status # ceph fs set max_mds 1 + + #. Wait for the cluster to deactivate any non-zero ranks by periodically checking the status + + .. prompt:: bash # + + ceph status + + #. Take all standby MDS daemons offline on the appropriate hosts with + + .. prompt:: bash # + + systemctl stop ceph-mds@ + + #. Confirm that only one MDS is online and is rank 0 for your FS + + .. prompt:: bash # + + ceph status + + #. Upgrade the last remaining MDS daemon by installing the new packages and restarting the daemon + + .. prompt:: bash # + + systemctl restart ceph-mds.target + + #. Restart all standby MDS daemons that were taken offline + + .. prompt:: bash # + + systemctl start ceph-mds.target + + #. Restore the original value of `max_mds` for the volume + + .. prompt:: bash # + + ceph fs set max_mds + + #. If upgrading from Pacific <=16.2.5 (followup to step 5.2): + + .. prompt:: bash # + + ceph config set mon mon_mds_skip_sanity false + +#. Upgrade all radosgw daemons by upgrading packages and restarting daemons on all hosts + + .. prompt:: bash # + + systemctl restart ceph-radosgw.target + +#. Complete the upgrade by disallowing pre-Reef OSDs and enabling all new Reef-only functionality + + .. prompt:: bash # + + ceph osd require-osd-release reef + +#. If you set `noout` at the beginning, be sure to clear it with + + .. prompt:: bash # + + ceph osd unset noout + +#. Consider transitioning your cluster to use the cephadm deployment and orchestration framework to simplify cluster management and future upgrades. For more information on converting an existing cluster to cephadm, see https://docs.ceph.com/en/reef/cephadm/adoption/. + +Post-upgrade +~~~~~~~~~~~~ + +#. Verify the cluster is healthy with `ceph health`. If your cluster is running Filestore, and you are upgrading directly from Pacific to Reef, a deprecation warning is expected. This warning can be temporarily muted using the following command + + .. prompt:: bash # + + ceph health mute OSD_FILESTORE + +#. Consider enabling the `telemetry module `_ to send anonymized usage statistics and crash information to the Ceph upstream developers. To see what would be reported (without actually sending any information to anyone), + + .. prompt:: bash # + + ceph telemetry preview-all + + If you are comfortable with the data that is reported, you can opt-in to automatically report the high-level cluster metadata with + + .. prompt:: bash # + + ceph telemetry on + + The public dashboard that aggregates Ceph telemetry can be found at https://telemetry-public.ceph.com/. + +Upgrading from pre-Pacific releases (like Octopus) +__________________________________________________ + +You **must** first upgrade to Pacific (16.2.z) or Quincy (17.2.z) before upgrading to Reef. diff --git a/ceph/doc/releases/releases.yml b/ceph/doc/releases/releases.yml index 7a01c6b42..5eaffdc65 100644 --- a/ceph/doc/releases/releases.yml +++ b/ceph/doc/releases/releases.yml @@ -12,6 +12,11 @@ # If a version might represent an actual number (e.g. 0.80) quote it. # releases: + reef: + target_eol: 2025-08-01 + releases: + - version: 18.2.0 + released: 2023-08-07 quincy: target_eol: 2024-06-01 releases: @@ -29,7 +34,7 @@ releases: released: 2022-04-19 pacific: - target_eol: 2023-06-01 + target_eol: 2023-10-01 releases: - version: 16.2.11 released: 2023-01-26 diff --git a/ceph/doc/start/documenting-ceph.rst b/ceph/doc/start/documenting-ceph.rst index f9b13452d..02d4dccc4 100644 --- a/ceph/doc/start/documenting-ceph.rst +++ b/ceph/doc/start/documenting-ceph.rst @@ -973,6 +973,15 @@ convention was preferred because it made the documents more readable in a command line interface. As of 2023, though, we have no preference for one over the other. Use whichever convention makes the text easier to read. +Using a part of a sentence as a hyperlink, `like this `_, is +discouraged. The convention of writing "See X" is preferred. Here are some +preferred formulations: + +#. For more information, see `docs.ceph.com `_. + +#. See `docs.ceph.com `_. + + Quirks of ReStructured Text --------------------------- @@ -981,7 +990,8 @@ External Links .. _external_link_with_inline_text: -This is the formula for links to addresses external to the Ceph documentation: +Use the formula immediately below to render links that direct the reader to +addresses external to the Ceph documentation: :: @@ -994,10 +1004,13 @@ This is the formula for links to addresses external to the Ceph documentation: To link to addresses that are external to the Ceph documentation, include a space between the inline text and the angle bracket that precedes the - external address. This is precisely the opposite of :ref:`the convention for - inline text that links to a location inside the Ceph - documentation`. If this seems inconsistent - and confusing to you, then you're right. It is inconsistent and confusing. + external address. This is precisely the opposite of the convention for + inline text that links to a location inside the Ceph documentation. See + :ref:`here ` for an exemplar of this + convention. + + If this seems inconsistent and confusing to you, then you're right. It is + inconsistent and confusing. See also ":ref:`External Hyperlink Example`". diff --git a/ceph/doc/start/hardware-recommendations.rst b/ceph/doc/start/hardware-recommendations.rst index c759d7495..a63b5a457 100644 --- a/ceph/doc/start/hardware-recommendations.rst +++ b/ceph/doc/start/hardware-recommendations.rst @@ -1,66 +1,83 @@ .. _hardware-recommendations: ========================== - Hardware Recommendations + hardware recommendations ========================== -Ceph was designed to run on commodity hardware, which makes building and -maintaining petabyte-scale data clusters economically feasible. -When planning out your cluster hardware, you will need to balance a number -of considerations, including failure domains and potential performance -issues. Hardware planning should include distributing Ceph daemons and +Ceph is designed to run on commodity hardware, which makes building and +maintaining petabyte-scale data clusters flexible and economically feasible. +When planning your cluster's hardware, you will need to balance a number +of considerations, including failure domains, cost, and performance. +Hardware planning should include distributing Ceph daemons and other processes that use Ceph across many hosts. Generally, we recommend running Ceph daemons of a specific type on a host configured for that type -of daemon. We recommend using other hosts for processes that utilize your -data cluster (e.g., OpenStack, CloudStack, etc). +of daemon. We recommend using separate hosts for processes that utilize your +data cluster (e.g., OpenStack, CloudStack, Kubernetes, etc). +The requirements of one Ceph cluster are not the same as the requirements of +another, but below are some general guidelines. -.. tip:: Check out the `Ceph blog`_ too. - +.. tip:: check out the `ceph blog`_ too. CPU === -CephFS metadata servers (MDS) are CPU-intensive. CephFS metadata servers (MDS) -should therefore have quad-core (or better) CPUs and high clock rates (GHz). OSD -nodes need enough processing power to run the RADOS service, to calculate data +CephFS Metadata Servers (MDS) are CPU-intensive. They are +are single-threaded and perform best with CPUs with a high clock rate (GHz). MDS +servers do not need a large number of CPU cores unless they are also hosting other +services, such as SSD OSDs for the CephFS metadata pool. +OSD nodes need enough processing power to run the RADOS service, to calculate data placement with CRUSH, to replicate data, and to maintain their own copies of the cluster map. -The requirements of one Ceph cluster are not the same as the requirements of -another, but here are some general guidelines. - -In earlier versions of Ceph, we would make hardware recommendations based on -the number of cores per OSD, but this cores-per-OSD metric is no longer as -useful a metric as the number of cycles per IOP and the number of IOPs per OSD. -For example, for NVMe drives, Ceph can easily utilize five or six cores on real +With earlier releases of Ceph, we would make hardware recommendations based on +the number of cores per OSD, but this cores-per-osd metric is no longer as +useful a metric as the number of cycles per IOP and the number of IOPS per OSD. +For example, with NVMe OSD drives, Ceph can easily utilize five or six cores on real clusters and up to about fourteen cores on single OSDs in isolation. So cores per OSD are no longer as pressing a concern as they were. When selecting -hardware, select for IOPs per core. +hardware, select for IOPS per core. -Monitor nodes and manager nodes have no heavy CPU demands and require only -modest processors. If your host machines will run CPU-intensive processes in +.. tip:: When we speak of CPU _cores_, we mean _threads_ when hyperthreading + is enabled. Hyperthreading is usually beneficial for Ceph servers. + +Monitor nodes and Manager nodes do not have heavy CPU demands and require only +modest processors. if your hosts will run CPU-intensive processes in addition to Ceph daemons, make sure that you have enough processing power to run both the CPU-intensive processes and the Ceph daemons. (OpenStack Nova is -one such example of a CPU-intensive process.) We recommend that you run +one example of a CPU-intensive process.) We recommend that you run non-Ceph CPU-intensive processes on separate hosts (that is, on hosts that are -not your monitor and manager nodes) in order to avoid resource contention. +not your Monitor and Manager nodes) in order to avoid resource contention. +If your cluster deployes the Ceph Object Gateway, RGW daemons may co-reside +with your Mon and Manager services if the nodes have sufficient resources. RAM === -Generally, more RAM is better. Monitor / manager nodes for a modest cluster +Generally, more RAM is better. Monitor / Manager nodes for a modest cluster might do fine with 64GB; for a larger cluster with hundreds of OSDs 128GB -is a reasonable target. There is a memory target for BlueStore OSDs that +is advised. + +.. tip:: when we speak of RAM and storage requirements, we often describe + the needs of a single daemon of a given type. A given server as + a whole will thus need at least the sum of the needs of the + daemons that it hosts as well as resources for logs and other operating + system components. Keep in mind that a server's need for RAM + and storage will be greater at startup and when components + fail or are added and the cluster rebalances. In other words, + allow headroom past what you might see used during a calm period + on a small initial cluster footprint. + +There is an :confval:`osd_memory_target` setting for BlueStore OSDs that defaults to 4GB. Factor in a prudent margin for the operating system and administrative tasks (like monitoring and metrics) as well as increased -consumption during recovery: provisioning ~8GB per BlueStore OSD -is advised. +consumption during recovery: provisioning ~8GB *per BlueStore OSD* is thus +advised. Monitors and managers (ceph-mon and ceph-mgr) --------------------------------------------- -Monitor and manager daemon memory usage generally scales with the size of the +Monitor and manager daemon memory usage scales with the size of the cluster. Note that at boot-time and during topology changes and recovery these daemons will need more RAM than they do during steady-state operation, so plan for peak usage. For very small clusters, 32 GB suffices. For clusters of up to, @@ -75,8 +92,8 @@ tuning the following settings: Metadata servers (ceph-mds) --------------------------- -The metadata daemon memory utilization depends on how much memory its cache is -configured to consume. We recommend 1 GB as a minimum for most systems. See +CephFS metadata daemon memory utilization depends on the configured size of +its cache. We recommend 1 GB as a minimum for most systems. See :confval:`mds_cache_memory_limit`. @@ -88,23 +105,24 @@ operating system's page cache. In Bluestore you can adjust the amount of memory that the OSD attempts to consume by changing the :confval:`osd_memory_target` configuration option. -- Setting the :confval:`osd_memory_target` below 2GB is typically not - recommended (Ceph may fail to keep the memory consumption under 2GB and - this may cause extremely slow performance). +- Setting the :confval:`osd_memory_target` below 2GB is not + recommended. Ceph may fail to keep the memory consumption under 2GB and + extremely slow performance is likely. - Setting the memory target between 2GB and 4GB typically works but may result - in degraded performance: metadata may be read from disk during IO unless the - active data set is relatively small. + in degraded performance: metadata may need to be read from disk during IO + unless the active data set is relatively small. -- 4GB is the current default :confval:`osd_memory_target` size. This default - was chosen for typical use cases, and is intended to balance memory - requirements and OSD performance. +- 4GB is the current default value for :confval:`osd_memory_target` This default + was chosen for typical use cases, and is intended to balance RAM cost and + OSD performance. - Setting the :confval:`osd_memory_target` higher than 4GB can improve performance when there many (small) objects or when large (256GB/OSD - or more) data sets are processed. + or more) data sets are processed. This is especially true with fast + NVMe OSDs. -.. important:: OSD memory autotuning is "best effort". Although the OSD may +.. important:: OSD memory management is "best effort". Although the OSD may unmap memory to allow the kernel to reclaim it, there is no guarantee that the kernel will actually reclaim freed memory within a specific time frame. This applies especially in older versions of Ceph, where transparent @@ -113,14 +131,19 @@ configuration option. pages at the application level to avoid this, but that does not guarantee that the kernel will immediately reclaim unmapped memory. The OSD may still at times exceed its memory target. We recommend budgeting - approximately 20% extra memory on your system to prevent OSDs from going OOM + at least 20% extra memory on your system to prevent OSDs from going OOM (**O**\ut **O**\f **M**\emory) during temporary spikes or due to delay in the kernel reclaiming freed pages. That 20% value might be more or less than needed, depending on the exact configuration of the system. -When using the legacy FileStore back end, the page cache is used for caching -data, so no tuning is normally needed. When using the legacy FileStore backend, -the OSD memory consumption is related to the number of PGs per daemon in the +.. tip:: Configuring the operating system with swap to provide additional + virtual memory for daemons is not advised for modern systems. Doing + may result in lower performance, and your Ceph cluster may well be + happier with a daemon that crashes vs one that slows to a crawl. + +When using the legacy FileStore back end, the OS page cache was used for caching +data, so tuning was not normally needed. When using the legacy FileStore backend, +the OSD memory consumption was related to the number of PGs per daemon in the system. @@ -130,13 +153,34 @@ Data Storage Plan your data storage configuration carefully. There are significant cost and performance tradeoffs to consider when planning for data storage. Simultaneous OS operations and simultaneous requests from multiple daemons for read and -write operations against a single drive can slow performance. +write operations against a single drive can impact performance. + +OSDs require substantial storage drive space for RADOS data. We recommend a +minimum drive size of 1 terabyte. OSD drives much smaller than one terabyte +use a significant fraction of their capacity for metadata, and drives smaller +than 100 gigabytes will not be effective at all. + +It is *strongly* suggested that (enterprise-class) SSDs are provisioned for, at a +minimum, Ceph Monitor and Ceph Manager hosts, as well as CephFS Metadata Server +metadata pools and Ceph Object Gateway (RGW) index pools, even if HDDs are to +be provisioned for bulk OSD data. + +To get the best performance out of Ceph, provision the following on separate +drives: + +* The operating systems +* OSD data +* BlueStore WAL+DB + +For more +information on how to effectively use a mix of fast drives and slow drives in +your Ceph cluster, see the `block and block.db`_ section of the Bluestore +Configuration Reference. Hard Disk Drives ---------------- -OSDs should have plenty of storage drive space for object data. We recommend a -minimum disk drive size of 1 terabyte. Consider the cost-per-gigabyte advantage +Consider carefully the cost-per-gigabyte advantage of larger disks. We recommend dividing the price of the disk drive by the number of gigabytes to arrive at a cost per gigabyte, because larger drives may have a significant impact on the cost-per-gigabyte. For example, a 1 terabyte @@ -146,11 +190,10 @@ per gigabyte (i.e., $150 / 3072 = 0.0488). In the foregoing example, using the 1 terabyte disks would generally increase the cost per gigabyte by 40%--rendering your cluster substantially less cost efficient. -.. tip:: Running multiple OSDs on a single SAS / SATA drive - is **NOT** a good idea. NVMe drives, however, can achieve - improved performance by being split into two or more OSDs. +.. tip:: Hosting multiple OSDs on a single SAS / SATA HDD + is **NOT** a good idea. -.. tip:: Running an OSD and a monitor or a metadata server on a single +.. tip:: Hosting an OSD with monitor, manager, or MDS data on a single drive is also **NOT** a good idea. .. tip:: With spinning disks, the SATA and SAS interface increasingly @@ -162,35 +205,36 @@ Storage drives are subject to limitations on seek time, access time, read and write times, as well as total throughput. These physical limitations affect overall system performance--especially during recovery. We recommend using a dedicated (ideally mirrored) drive for the operating system and software, and -one drive for each Ceph OSD Daemon you run on the host (modulo NVMe above). +one drive for each Ceph OSD Daemon you run on the host. Many "slow OSD" issues (when they are not attributable to hardware failure) arise from running an operating system and multiple OSDs on the same drive. +Also be aware that today's 22TB HDD uses the same SATA interface as a +3TB HDD from ten years ago: more than seven times the data to squeeze +through the same same interface. For this reason, when using HDDs for +OSDs, drives larger than 8TB may be best suited for storage of large +files / objects that are not at all performance-sensitive. -It is technically possible to run multiple Ceph OSD Daemons per SAS / SATA -drive, but this will lead to resource contention and diminish overall -throughput. - -To get the best performance out of Ceph, run the following on separate drives: -(1) operating systems, (2) OSD data, and (3) BlueStore db. For more -information on how to effectively use a mix of fast drives and slow drives in -your Ceph cluster, see the `block and block.db`_ section of the Bluestore -Configuration Reference. Solid State Drives ------------------ -Ceph performance can be improved by using solid-state drives (SSDs). This -reduces random access time and reduces latency while accelerating throughput. +Ceph performance is much improved when using solid-state drives (SSDs). This +reduces random access time and reduces latency while increasing throughput. -SSDs cost more per gigabyte than do hard disk drives, but SSDs often offer -access times that are, at a minimum, 100 times faster than hard disk drives. +SSDs cost more per gigabyte than do HDDs but SSDs often offer +access times that are, at a minimum, 100 times faster than HDDs. SSDs avoid hotspot issues and bottleneck issues within busy clusters, and -they may offer better economics when TCO is evaluated holistically. - -SSDs do not have moving mechanical parts, so they are not necessarily subject -to the same types of limitations as hard disk drives. SSDs do have significant +they may offer better economics when TCO is evaluated holistically. Notably, +the amortized drive cost for a given number of IOPS is much lower with SSDs +than with HDDs. SSDs do not suffer rotational or seek latency and in addition +to improved client performance, they substantially improve the speed and +client impact of cluster changes including rebalancing when OSDs or Monitors +are added, removed, or fail. + +SSDs do not have moving mechanical parts, so they are not subject +to many of the limitations of HDDs. SSDs do have significant limitations though. When evaluating SSDs, it is important to consider the -performance of sequential reads and writes. +performance of sequential and random reads and writes. .. important:: We recommend exploring the use of SSDs to improve performance. However, before making a significant investment in SSDs, we **strongly @@ -198,16 +242,36 @@ performance of sequential reads and writes. SSD in a test configuration in order to gauge performance. Relatively inexpensive SSDs may appeal to your sense of economy. Use caution. -Acceptable IOPS are not the only factor to consider when selecting an SSD for -use with Ceph. - -SSDs have historically been cost prohibitive for object storage, but emerging -QLC drives are closing the gap, offering greater density with lower power -consumption and less power spent on cooling. HDD OSDs may see a significant -performance improvement by offloading WAL+DB onto an SSD. - -To get a better sense of the factors that determine the cost of storage, you -might use the `Storage Networking Industry Association's Total Cost of +Acceptable IOPS are not the only factor to consider when selecting SSDs for +use with Ceph. Bargain SSDs are often a false economy: they may experience +"cliffing", which means that after an initial burst, sustained performance +once a limited cache is filled declines considerably. Consider also durability: +a drive rated for 0.3 Drive Writes Per Day (DWPD or equivalent) may be fine for +OSDs dedicated to certain types of sequentially-written read-mostly data, but +are not a good choice for Ceph Monitor duty. Enterprise-class SSDs are best +for Ceph: they almost always feature power less protection (PLP) and do +not suffer the dramatic cliffing that client (desktop) models may experience. + +When using a single (or mirrored pair) SSD for both operating system boot +and Ceph Monitor / Manager purposes, a minimum capacity of 256GB is advised +and at least 480GB is recommended. A drive model rated at 1+ DWPD (or the +equivalent in TBW (TeraBytes Written) is suggested. However, for a given write +workload, a larger drive than technically required will provide more endurance +because it effectively has greater overprovsioning. We stress that +enterprise-class drives are best for production use, as they feature power +loss protection and increased durability compared to client (desktop) SKUs +that are intended for much lighter and intermittent duty cycles. + +SSDs were historically been cost prohibitive for object storage, but +QLC SSDs are closing the gap, offering greater density with lower power +consumption and less power spent on cooling. Also, HDD OSDs may see a +significant write latency improvement by offloading WAL+DB onto an SSD. +Many Ceph OSD deployments do not require an SSD with greater endurance than +1 DWPD (aka "read-optimized"). "Mixed-use" SSDs in the 3 DWPD class are +often overkill for this purpose and cost signficantly more. + +To get a better sense of the factors that determine the total cost of storage, +you might use the `Storage Networking Industry Association's Total Cost of Ownership calculator`_ Partition Alignment @@ -222,11 +286,11 @@ alignment and example commands that show how to align partitions properly, see CephFS Metadata Segregation ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -One way that Ceph accelerates CephFS file system performance is by segregating +One way that Ceph accelerates CephFS file system performance is by separating the storage of CephFS metadata from the storage of the CephFS file contents. Ceph provides a default ``metadata`` pool for CephFS metadata. You will never -have to create a pool for CephFS metadata, but you can create a CRUSH map -hierarchy for your CephFS metadata pool that points only to SSD storage media. +have to manually create a pool for CephFS metadata, but you can create a CRUSH map +hierarchy for your CephFS metadata pool that includes only SSD storage media. See :ref:`CRUSH Device Class` for details. @@ -237,8 +301,20 @@ Disk controllers (HBAs) can have a significant impact on write throughput. Carefully consider your selection of HBAs to ensure that they do not create a performance bottleneck. Notably, RAID-mode (IR) HBAs may exhibit higher latency than simpler "JBOD" (IT) mode HBAs. The RAID SoC, write cache, and battery -backup can substantially increase hardware and maintenance costs. Some RAID -HBAs can be configured with an IT-mode "personality". +backup can substantially increase hardware and maintenance costs. Many RAID +HBAs can be configured with an IT-mode "personality" or "JBOD mode" for +streamlined operation. + +You do not need an RoC (RAID-capable) HBA. ZFS or Linux MD software mirroring +serve well for boot volume durability. When using SAS or SATA data drives, +forgoing HBA RAID capabilities can reduce the gap between HDD and SSD +media cost. Moreover, when using NVMe SSDs, you do not need *any* HBA. This +additionally reduces the HDD vs SSD cost gap when the system as a whole is +considered. The initial cost of a fancy RAID HBA plus onboard cache plus +battery backup (BBU or supercapacitor) can easily exceed more than 1000 US +dollars even after discounts - a sum that goes a log way toward SSD cost parity. +An HBA-free system may also cost hundreds of US dollars less every year if one +purchases an annual maintenance contract or extended warranty. .. tip:: The `Ceph blog`_ is often an excellent source of information on Ceph performance issues. See `Ceph Write Throughput 1`_ and `Ceph Write @@ -248,10 +324,10 @@ HBAs can be configured with an IT-mode "personality". Benchmarking ------------ -BlueStore opens block devices in O_DIRECT and uses fsync frequently to ensure -that data is safely persisted to media. You can evaluate a drive's low-level -write performance using ``fio``. For example, 4kB random write performance is -measured as follows: +BlueStore opens storage devices with ``O_DIRECT`` and issues ``fsync()`` +frequently to ensure that data is safely persisted to media. You can evaluate a +drive's low-level write performance using ``fio``. For example, 4kB random write +performance is measured as follows: .. code-block:: console @@ -261,6 +337,7 @@ Write Caches ------------ Enterprise SSDs and HDDs normally include power loss protection features which +ensure data durability when power is lost while operating, and use multi-level caches to speed up direct or synchronous writes. These devices can be toggled between two caching modes -- a volatile cache flushed to persistent media with fsync, or a non-volatile cache written synchronously. @@ -269,9 +346,9 @@ These two modes are selected by either "enabling" or "disabling" the write (volatile) cache. When the volatile cache is enabled, Linux uses a device in "write back" mode, and when disabled, it uses "write through". -The default configuration (normally caching enabled) may not be optimal, and +The default configuration (usually: caching is enabled) may not be optimal, and OSD performance may be dramatically increased in terms of increased IOPS and -decreased commit_latency by disabling the write cache. +decreased commit latency by disabling this write cache. Users are therefore encouraged to benchmark their devices with ``fio`` as described earlier and persist the optimal cache configuration for their @@ -319,11 +396,11 @@ The write cache can be disabled with those same tools: === START OF ENABLE/DISABLE COMMANDS SECTION === Write cache disabled -Normally, disabling the cache using ``hdparm``, ``sdparm``, or ``smartctl`` +In most cases, disabling this cache using ``hdparm``, ``sdparm``, or ``smartctl`` results in the cache_type changing automatically to "write through". If this is -not the case, you can try setting it directly as follows. (Users should note +not the case, you can try setting it directly as follows. (Users should ensure that setting cache_type also correctly persists the caching mode of the device -until the next reboot): +until the next reboot as some drives require this to be repeated at every boot): .. code-block:: console @@ -367,13 +444,13 @@ until the next reboot): Additional Considerations ------------------------- -You typically will run multiple OSDs per host, but you should ensure that the -aggregate throughput of your OSD drives doesn't exceed the network bandwidth -required to service a client's need to read or write data. You should also -consider what percentage of the overall data the cluster stores on each host. If -the percentage on a particular host is large and the host fails, it can lead to -problems such as exceeding the ``full ratio``, which causes Ceph to halt -operations as a safety precaution that prevents data loss. +Ceph operators typically provision multiple OSDs per host, but you should +ensure that the aggregate throughput of your OSD drives doesn't exceed the +network bandwidth required to service a client's read and write operations. +You should also each host's percentage of the cluster's overall capacity. If +the percentage located on a particular host is large and the host fails, it +can lead to problems such as recovery causing OSDs to exceed the ``full ratio``, +which in turn causes Ceph to halt operations to prevent data loss. When you run multiple OSDs per host, you also need to ensure that the kernel is up to date. See `OS Recommendations`_ for notes on ``glibc`` and @@ -384,7 +461,11 @@ multiple OSDs per host. Networks ======== -Provision at least 10 Gb/s networking in your racks. +Provision at least 10 Gb/s networking in your datacenter, both among Ceph +hosts and between clients and your Ceph cluster. Network link active/active +bonding across separate network switches is strongly recommended both for +increased throughput and for tolerance of network failures and maintenance. +Take care that your bonding hash policy distributes traffic across links. Speed ----- @@ -392,13 +473,20 @@ Speed It takes three hours to replicate 1 TB of data across a 1 Gb/s network and it takes thirty hours to replicate 10 TB across a 1 Gb/s network. But it takes only twenty minutes to replicate 1 TB across a 10 Gb/s network, and it takes -only one hour to replicate 10 TB across a 10 Gb/s network. +only one hour to replicate 10 TB across a 10 Gb/s network. + +Note that a 40 Gb/s network link is effectively four 10 Gb/s channels in +parallel, and that a 100Gb/s network link is effectively four 25 Gb/s channels +in parallel. Thus, and perhaps somewhat counterintuitively, an individual +packet on a 25 Gb/s network has slightly lower latency compared to a 40 Gb/s +network. + Cost ---- The larger the Ceph cluster, the more common OSD failures will be. -The faster that a placement group (PG) can recover from a ``degraded`` state to +The faster that a placement group (PG) can recover from a degraded state to an ``active + clean`` state, the better. Notably, fast recovery minimizes the likelihood of multiple, overlapping failures that can cause data to become temporarily unavailable or even lost. Of course, when provisioning your @@ -410,10 +498,10 @@ switches. The added expense of this hardware may be offset by the operational cost savings on network setup and maintenance. When using VLANs to handle VM traffic between the cluster and compute stacks (e.g., OpenStack, CloudStack, etc.), there is additional value in using 10 Gb/s Ethernet or better; 40 Gb/s or -25/50/100 Gb/s networking as of 2022 is common for production clusters. +increasingly 25/50/100 Gb/s networking as of 2022 is common for production clusters. -Top-of-rack (TOR) switches also need fast and redundant uplinks to spind -spine switches / routers, often at least 40 Gb/s. +Top-of-rack (TOR) switches also need fast and redundant uplinks to +core / spine network switches or routers, often at least 40 Gb/s. Baseboard Management Controller (BMC) @@ -425,78 +513,103 @@ Administration and deployment tools may also use BMCs extensively, especially via IPMI or Redfish, so consider the cost/benefit tradeoff of an out-of-band network for security and administration. Hypervisor SSH access, VM image uploads, OS image installs, management sockets, etc. can impose significant loads on a network. -Running three networks may seem like overkill, but each traffic path represents +Running multiple networks may seem like overkill, but each traffic path represents a potential capacity, throughput and/or performance bottleneck that you should carefully consider before deploying a large scale data cluster. + +Additionally BMCs as of 2023 rarely sport network connections faster than 1 Gb/s, +so dedicated and inexpensive 1 Gb/s switches for BMC administrative traffic +may reduce costs by wasting fewer expenive ports on faster host switches. Failure Domains =============== -A failure domain is any failure that prevents access to one or more OSDs. That -could be a stopped daemon on a host; a disk failure, an OS crash, a -malfunctioning NIC, a failed power supply, a network outage, a power outage, -and so forth. When planning out your hardware needs, you must balance the -temptation to reduce costs by placing too many responsibilities into too few -failure domains, and the added costs of isolating every potential failure -domain. +A failure domain can be thought of as any component loss that prevents access to +one or more OSDs or other Ceph daemons. These could be a stopped daemon on a host; +a storage drive failure, an OS crash, a malfunctioning NIC, a failed power supply, +a network outage, a power outage, and so forth. When planning your hardware +deployment, you must balance the risk of reducing costs by placing too many +responsibilities into too few failure domains against the added costs of +isolating every potential failure domain. Minimum Hardware Recommendations ================================ Ceph can run on inexpensive commodity hardware. Small production clusters -and development clusters can run successfully with modest hardware. +and development clusters can run successfully with modest hardware. As +we noted above: when we speak of CPU _cores_, we mean _threads_ when +hyperthreading (HT) is enabled. Each modern physical x64 CPU core typically +provides two logical CPU threads; other CPU architectures may vary. + +Take care that there are many factors that influence resource choices. The +minimum resources that suffice for one purpose will not necessarily suffice for +another. A sandbox cluster with one OSD built on a laptop with VirtualBox or on +a trio of Raspberry PIs will get by with fewer resources than a production +deployment with a thousand OSDs serving five thousand of RBD clients. The +classic Fisher Price PXL 2000 captures video, as does an IMAX or RED camera. +One would not expect the former to do the job of the latter. We especially +cannot stress enough the criticality of using enterprise-quality storage +media for production workloads. + +Additional insights into resource planning for production clusters are +found above and elsewhere within this documentation. +--------------+----------------+-----------------------------------------+ -| Process | Criteria | Minimum Recommended | +| Process | Criteria | Bare Minimum and Recommended | +==============+================+=========================================+ -| ``ceph-osd`` | Processor | - 1 core minimum | -| | | - 1 core per 200-500 MB/s | +| ``ceph-osd`` | Processor | - 1 core minimum, 2 recommended | +| | | - 1 core per 200-500 MB/s throughput | | | | - 1 core per 1000-3000 IOPS | | | | | | | | * Results are before replication. | -| | | * Results may vary with different | -| | | CPU models and Ceph features. | +| | | * Results may vary across CPU and drive | +| | | models and Ceph configuration: | | | | (erasure coding, compression, etc) | | | | * ARM processors specifically may | -| | | require additional cores. | +| | | require more cores for performance. | +| | | * SSD OSDs, especially NVMe, will | +| | | benefit from additional cores per OSD.| | | | * Actual performance depends on many | | | | factors including drives, net, and | | | | client throughput and latency. | | | | Benchmarking is highly recommended. | | +----------------+-----------------------------------------+ | | RAM | - 4GB+ per daemon (more is better) | -| | | - 2-4GB often functions (may be slow) | -| | | - Less than 2GB not recommended | +| | | - 2-4GB may function but may be slow | +| | | - Less than 2GB is not recommended | | +----------------+-----------------------------------------+ -| | Volume Storage | 1x storage drive per daemon | +| | Storage Drives | 1x storage drive per OSD | | +----------------+-----------------------------------------+ -| | DB/WAL | 1x SSD partition per daemon (optional) | +| | DB/WAL | 1x SSD partion per HDD OSD | +| | (optional) | 4-5x HDD OSDs per DB/WAL SATA SSD | +| | | <= 10 HDD OSDss per DB/WAL NVMe SSD | | +----------------+-----------------------------------------+ -| | Network | 1x 1GbE+ NICs (10GbE+ recommended) | +| | Network | 1x 1Gb/s (bonded 10+ Gb/s recommended) | +--------------+----------------+-----------------------------------------+ | ``ceph-mon`` | Processor | - 2 cores minimum | | +----------------+-----------------------------------------+ -| | RAM | 2-4GB+ per daemon | +| | RAM | 5GB+ per daemon (large / production | +| | | clusters need more) | | +----------------+-----------------------------------------+ -| | Disk Space | 60 GB per daemon | +| | Storage | 100 GB per daemon, SSD is recommended | | +----------------+-----------------------------------------+ -| | Network | 1x 1GbE+ NICs | +| | Network | 1x 1Gb/s (10+ Gb/s recommended) | +--------------+----------------+-----------------------------------------+ | ``ceph-mds`` | Processor | - 2 cores minimum | | +----------------+-----------------------------------------+ -| | RAM | 2GB+ per daemon | +| | RAM | 2GB+ per daemon (more for production) | | +----------------+-----------------------------------------+ -| | Disk Space | 1 MB per daemon | +| | Disk Space | 1 GB per daemon | | +----------------+-----------------------------------------+ -| | Network | 1x 1GbE+ NICs | +| | Network | 1x 1Gb/s (10+ Gb/s recommended) | +--------------+----------------+-----------------------------------------+ -.. tip:: If you are running an OSD with a single disk, create a - partition for your volume storage that is separate from the partition - containing the OS. Generally, we recommend separate disks for the - OS and the volume storage. +.. tip:: If you are running an OSD node with a single storage drive, create a + partition for your OSD that is separate from the partition + containing the OS. We recommend separate drives for the + OS and for OSD storage. diff --git a/ceph/doc/start/os-recommendations.rst b/ceph/doc/start/os-recommendations.rst index 98cef7820..81906569e 100644 --- a/ceph/doc/start/os-recommendations.rst +++ b/ceph/doc/start/os-recommendations.rst @@ -35,20 +35,38 @@ Linux Kernel Platforms ========= -The charts below show how Ceph's requirements map onto various Linux -platforms. Generally speaking, there is very little dependence on -specific distributions outside of the kernel and system initialization -package (i.e., sysvinit, systemd). - -+--------------+--------+------------------------+--------------------------------+-------------------+-----------------+ -| Release Name | Tag | CentOS | Ubuntu | OpenSUSE :sup:`C` | Debian :sup:`C` | -+==============+========+========================+================================+===================+=================+ -| Quincy | 17.2.z | 8 :sup:`A` | 20.04 :sup:`A` | 15.3 | 11 | -+--------------+--------+------------------------+--------------------------------+-------------------+-----------------+ -| Pacific | 16.2.z | 8 :sup:`A` | 18.04 :sup:`C`, 20.04 :sup:`A` | 15.2 | 10, 11 | -+--------------+--------+------------------------+--------------------------------+-------------------+-----------------+ -| Octopus | 15.2.z | 7 :sup:`B` 8 :sup:`A` | 18.04 :sup:`C`, 20.04 :sup:`A` | 15.2 | 10 | -+--------------+--------+------------------------+--------------------------------+-------------------+-----------------+ +The chart below shows which Linux platforms Ceph provides packages for, and +which platforms Ceph has been tested on. + +Ceph does not require a specific Linux distribution. Ceph can run on any +distribution that includes a supported kernel and supported system startup +framework, for example ``sysvinit`` or ``systemd``. Ceph is sometimes ported to +non-Linux systems but these are not supported by the core Ceph effort. + + ++---------------+---------------+-----------------+------------------+------------------+ +| | Reef (18.2.z) | Quincy (17.2.z) | Pacific (16.2.z) | Octopus (15.2.z) | ++===============+===============+=================+==================+==================+ +| Centos 7 | | | A | B | ++---------------+---------------+-----------------+------------------+------------------+ +| Centos 8 | A | A | A | A | ++---------------+---------------+-----------------+------------------+------------------+ +| Centos 9 | A | | | | ++---------------+---------------+-----------------+------------------+------------------+ +| Debian 10 | C | | C | C | ++---------------+---------------+-----------------+------------------+------------------+ +| Debian 11 | C | C | C | | ++---------------+---------------+-----------------+------------------+------------------+ +| OpenSUSE 15.2 | C | | C | C | ++---------------+---------------+-----------------+------------------+------------------+ +| OpenSUSE 15.3 | C | C | | | ++---------------+---------------+-----------------+------------------+------------------+ +| Ubuntu 18.04 | | | C | C | ++---------------+---------------+-----------------+------------------+------------------+ +| Ubuntu 20.04 | A | A | A | A | ++---------------+---------------+-----------------+------------------+------------------+ +| Ubuntu 22.04 | A | | | | ++---------------+---------------+-----------------+------------------+------------------+ - **A**: Ceph provides packages and has done comprehensive tests on the software in them. - **B**: Ceph provides packages and has done basic tests on the software in them. diff --git a/ceph/install-deps.sh b/ceph/install-deps.sh index e9dae008e..eb773c3eb 100755 --- a/ceph/install-deps.sh +++ b/ceph/install-deps.sh @@ -141,19 +141,51 @@ function install_pkg_on_ubuntu { fi } +boost_ver=1.79 + +function clean_boost_on_ubuntu { + in_jenkins && echo "CI_DEBUG: Running clean_boost_on_ubuntu() in install-deps.sh" + # Find currently installed version. If there are multiple + # versions, they end up newline separated + local installed_ver=$(apt -qq list --installed ceph-libboost*-dev 2>/dev/null | + cut -d' ' -f2 | + cut -d'.' -f1,2 | + sort -u) + # If installed_ver contains whitespace, we can't really count on it, + # but otherwise, bail out if the version installed is the version + # we want. + if test -n "$installed_ver" && + echo -n "$installed_ver" | tr '[:space:]' ' ' | grep -v -q ' '; then + if echo "$installed_ver" | grep -q "^$boost_ver"; then + return + fi + fi + + # Historical packages + $SUDO rm -f /etc/apt/sources.list.d/ceph-libboost*.list + # Currently used + $SUDO rm -f /etc/apt/sources.list.d/libboost.list + # Refresh package list so things aren't in the available list. + $SUDO env DEBIAN_FRONTEND=noninteractive apt-get update -y || true + # Remove all ceph-libboost packages. We have an early return if + # the desired version is already (and the only) version installed, + # so no need to spare it. + if test -n "$installed_ver"; then + $SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y --fix-missing remove "ceph-libboost*" + fi +} + function install_boost_on_ubuntu { in_jenkins && echo "CI_DEBUG: Running install_boost_on_ubuntu() in install-deps.sh" - local ver=1.79 + # Once we get to this point, clean_boost_on_ubuntu() should ensure + # that there is no more than one installed version. local installed_ver=$(apt -qq list --installed ceph-libboost*-dev 2>/dev/null | grep -e 'libboost[0-9].[0-9]\+-dev' | cut -d' ' -f2 | cut -d'.' -f1,2) if test -n "$installed_ver"; then - if echo "$installed_ver" | grep -q "^$ver"; then + if echo "$installed_ver" | grep -q "^$boost_ver"; then return - else - $SUDO env DEBIAN_FRONTEND=noninteractive apt-get -y remove "ceph-libboost.*${installed_ver}.*" - $SUDO rm -f /etc/apt/sources.list.d/ceph-libboost${installed_ver}.list fi fi local codename=$1 @@ -164,22 +196,22 @@ function install_boost_on_ubuntu { $sha1 \ $codename \ check \ - ceph-libboost-atomic$ver-dev \ - ceph-libboost-chrono$ver-dev \ - ceph-libboost-container$ver-dev \ - ceph-libboost-context$ver-dev \ - ceph-libboost-coroutine$ver-dev \ - ceph-libboost-date-time$ver-dev \ - ceph-libboost-filesystem$ver-dev \ - ceph-libboost-iostreams$ver-dev \ - ceph-libboost-program-options$ver-dev \ - ceph-libboost-python$ver-dev \ - ceph-libboost-random$ver-dev \ - ceph-libboost-regex$ver-dev \ - ceph-libboost-system$ver-dev \ - ceph-libboost-test$ver-dev \ - ceph-libboost-thread$ver-dev \ - ceph-libboost-timer$ver-dev + ceph-libboost-atomic${boost_ver}-dev \ + ceph-libboost-chrono${boost_ver}-dev \ + ceph-libboost-container${boost_ver}-dev \ + ceph-libboost-context${boost_ver}-dev \ + ceph-libboost-coroutine${boost_ver}-dev \ + ceph-libboost-date-time${boost_ver}-dev \ + ceph-libboost-filesystem${boost_ver}-dev \ + ceph-libboost-iostreams${boost_ver}-dev \ + ceph-libboost-program-options${boost_ver}-dev \ + ceph-libboost-python${boost_ver}-dev \ + ceph-libboost-random${boost_ver}-dev \ + ceph-libboost-regex${boost_ver}-dev \ + ceph-libboost-system${boost_ver}-dev \ + ceph-libboost-test${boost_ver}-dev \ + ceph-libboost-thread${boost_ver}-dev \ + ceph-libboost-timer${boost_ver}-dev } function install_libzbd_on_ubuntu { @@ -330,6 +362,9 @@ else case "$ID" in debian|ubuntu|devuan|elementary|softiron) echo "Using apt-get to install dependencies" + # Put this before any other invocation of apt so it can clean + # up in a broken case. + clean_boost_on_ubuntu $SUDO apt-get install -y devscripts equivs $SUDO apt-get install -y dpkg-dev ensure_python3_sphinx_on_ubuntu diff --git a/ceph/make-dist b/ceph/make-dist index 22bf2c98c..721e9128c 100755 --- a/ceph/make-dist +++ b/ceph/make-dist @@ -132,7 +132,7 @@ build_dashboard_frontend() { $CURR_DIR/src/tools/setup-virtualenv.sh $TEMP_DIR $TEMP_DIR/bin/pip install nodeenv - $TEMP_DIR/bin/nodeenv --verbose -p --node=14.15.1 + $TEMP_DIR/bin/nodeenv --verbose -p --node=18.17.0 cd src/pybind/mgr/dashboard/frontend . $TEMP_DIR/bin/activate diff --git a/ceph/qa/cephfs/begin/1-ceph.yaml b/ceph/qa/cephfs/begin/1-ceph.yaml index 6765a266a..531c8e3e0 100644 --- a/ceph/qa/cephfs/begin/1-ceph.yaml +++ b/ceph/qa/cephfs/begin/1-ceph.yaml @@ -3,3 +3,4 @@ log-rotate: ceph-osd: 10G tasks: - ceph: + create_rbd_pool: false diff --git a/ceph/qa/cephfs/overrides/ignorelist_health.yaml b/ceph/qa/cephfs/overrides/ignorelist_health.yaml index 7f0d49eab..d8b819288 100644 --- a/ceph/qa/cephfs/overrides/ignorelist_health.yaml +++ b/ceph/qa/cephfs/overrides/ignorelist_health.yaml @@ -10,3 +10,4 @@ overrides: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - \(FS_INLINE_DATA_DEPRECATED\) + - \(POOL_APP_NOT_ENABLED\) diff --git a/ceph/qa/distros/crimson-supported-all-distro/centos_8.yaml b/ceph/qa/distros/crimson-supported-all-distro/centos_8.yaml new file mode 120000 index 000000000..b7e6c9b4e --- /dev/null +++ b/ceph/qa/distros/crimson-supported-all-distro/centos_8.yaml @@ -0,0 +1 @@ +../all/centos_8.yaml \ No newline at end of file diff --git a/ceph/qa/distros/crimson-supported-all-distro/centos_latest.yaml b/ceph/qa/distros/crimson-supported-all-distro/centos_latest.yaml new file mode 120000 index 000000000..2e29883f3 --- /dev/null +++ b/ceph/qa/distros/crimson-supported-all-distro/centos_latest.yaml @@ -0,0 +1 @@ +../all/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-all-distro/centos_latest.yaml b/ceph/qa/distros/supported-all-distro/centos_latest.yaml new file mode 120000 index 000000000..2e29883f3 --- /dev/null +++ b/ceph/qa/distros/supported-all-distro/centos_latest.yaml @@ -0,0 +1 @@ +../all/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-all-distro/ubuntu_20.04.yaml b/ceph/qa/distros/supported-all-distro/ubuntu_20.04.yaml new file mode 120000 index 000000000..75d907e3b --- /dev/null +++ b/ceph/qa/distros/supported-all-distro/ubuntu_20.04.yaml @@ -0,0 +1 @@ +../all/ubuntu_20.04.yaml \ No newline at end of file diff --git a/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml b/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml index 75d907e3b..cfcd0d1a8 120000 --- a/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml +++ b/ceph/qa/distros/supported-all-distro/ubuntu_latest.yaml @@ -1 +1 @@ -../all/ubuntu_20.04.yaml \ No newline at end of file +../all/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/cachepool/none.yaml b/ceph/qa/rbd/conf/+ similarity index 100% rename from ceph/qa/suites/rbd/basic/cachepool/none.yaml rename to ceph/qa/rbd/conf/+ diff --git a/ceph/qa/rbd/conf/disable-pool-app.yaml b/ceph/qa/rbd/conf/disable-pool-app.yaml new file mode 100644 index 000000000..099532f57 --- /dev/null +++ b/ceph/qa/rbd/conf/disable-pool-app.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + mon warn on pool no app: false diff --git a/ceph/qa/suites/rbd/encryption/pool/ec-data-pool.yaml b/ceph/qa/rbd/data-pool/ec.yaml similarity index 100% rename from ceph/qa/suites/rbd/encryption/pool/ec-data-pool.yaml rename to ceph/qa/rbd/data-pool/ec.yaml diff --git a/ceph/qa/suites/rbd/cli/pool/none.yaml b/ceph/qa/rbd/data-pool/none.yaml similarity index 100% rename from ceph/qa/suites/rbd/cli/pool/none.yaml rename to ceph/qa/rbd/data-pool/none.yaml diff --git a/ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml b/ceph/qa/rbd/data-pool/replicated.yaml similarity index 100% rename from ceph/qa/suites/rbd/cli/pool/replicated-data-pool.yaml rename to ceph/qa/rbd/data-pool/replicated.yaml diff --git a/ceph/qa/rgw/ignore-pg-availability.yaml b/ceph/qa/rgw/ignore-pg-availability.yaml index 4b08b6b07..732e40306 100644 --- a/ceph/qa/rgw/ignore-pg-availability.yaml +++ b/ceph/qa/rgw/ignore-pg-availability.yaml @@ -1,9 +1,11 @@ # https://tracker.ceph.com/issues/45802 # https://tracker.ceph.com/issues/51282 # https://tracker.ceph.com/issues/61168 +# https://tracker.ceph.com/issues/62504 overrides: ceph: log-ignorelist: - \(PG_AVAILABILITY\) - \(PG_DEGRADED\) - \(POOL_APP_NOT_ENABLED\) + - not have an application enabled diff --git a/ceph/qa/standalone/ceph-helpers.sh b/ceph/qa/standalone/ceph-helpers.sh index 9f93be923..bf2c91bc0 100755 --- a/ceph/qa/standalone/ceph-helpers.sh +++ b/ceph/qa/standalone/ceph-helpers.sh @@ -1747,6 +1747,29 @@ function test_wait_for_peered() { ####################################################################### +## +# Wait until the cluster's health condition disappeared. +# $TIMEOUT default +# +# @param string to grep for in health detail +# @return 0 if the cluster health doesn't matches request, +# 1 otherwise if after $TIMEOUT seconds health condition remains. +# +function wait_for_health_gone() { + local grepstr=$1 + local -a delays=($(get_timeout_delays $TIMEOUT .1)) + local -i loop=0 + + while ceph health detail | grep "$grepstr" ; do + if (( $loop >= ${#delays[*]} )) ; then + ceph health detail + return 1 + fi + sleep ${delays[$loop]} + loop+=1 + done +} + ## # Wait until the cluster has health condition passed as arg # again for $TIMEOUT seconds. diff --git a/ceph/qa/standalone/mon-stretch/mon-stretch-fail-recovery.sh b/ceph/qa/standalone/mon-stretch/mon-stretch-fail-recovery.sh index 9ec5f5231..276d26aab 100755 --- a/ceph/qa/standalone/mon-stretch/mon-stretch-fail-recovery.sh +++ b/ceph/qa/standalone/mon-stretch/mon-stretch-fail-recovery.sh @@ -144,6 +144,5 @@ EOF sleep 3 teardown $dir || return 1 - } main mon-stretch-fail-recovery "$@" \ No newline at end of file diff --git a/ceph/qa/standalone/mon-stretch/mon-stretch-uneven-crush-weights.sh b/ceph/qa/standalone/mon-stretch/mon-stretch-uneven-crush-weights.sh new file mode 100755 index 000000000..7e13f4076 --- /dev/null +++ b/ceph/qa/standalone/mon-stretch/mon-stretch-uneven-crush-weights.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh +function run() { + local dir=$1 + shift + + export CEPH_MON_A="127.0.0.1:7139" # git grep '\<7139\>' : there must be only one + export CEPH_MON_B="127.0.0.1:7141" # git grep '\<7141\>' : there must be only one + export CEPH_MON_C="127.0.0.1:7142" # git grep '\<7142\>' : there must be only one + export CEPH_MON_D="127.0.0.1:7143" # git grep '\<7143\>' : there must be only one + export CEPH_MON_E="127.0.0.1:7144" # git grep '\<7144\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + + export BASE_CEPH_ARGS=$CEPH_ARGS + CEPH_ARGS+="--mon-host=$CEPH_MON_A" + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} +TEST_stretched_cluster_uneven_weight() { + local dir=$1 + local OSDS=4 + local weight=0.09000 + setup $dir || return 1 + + run_mon $dir a --public-addr $CEPH_MON_A || return 1 + wait_for_quorum 300 1 || return 1 + + run_mon $dir b --public-addr $CEPH_MON_B || return 1 + CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B" + wait_for_quorum 300 2 || return 1 + + run_mon $dir c --public-addr $CEPH_MON_C || return 1 + CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C" + wait_for_quorum 300 3 || return 1 + + run_mon $dir d --public-addr $CEPH_MON_D || return 1 + CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D" + wait_for_quorum 300 4 || return 1 + + run_mon $dir e --public-addr $CEPH_MON_E || return 1 + CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D,$CEPH_MON_E" + wait_for_quorum 300 5 || return 1 + + ceph mon set election_strategy connectivity + ceph mon add disallowed_leader e + + run_mgr $dir x || return 1 + run_mgr $dir y || return 1 + run_mgr $dir z || return 1 + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + for zone in iris pze + do + ceph osd crush add-bucket $zone zone + ceph osd crush move $zone root=default + done + + ceph osd crush add-bucket node-2 host + ceph osd crush add-bucket node-3 host + ceph osd crush add-bucket node-4 host + ceph osd crush add-bucket node-5 host + + ceph osd crush move node-2 zone=iris + ceph osd crush move node-3 zone=iris + ceph osd crush move node-4 zone=pze + ceph osd crush move node-5 zone=pze + + ceph osd crush move osd.0 host=node-2 + ceph osd crush move osd.1 host=node-3 + ceph osd crush move osd.2 host=node-4 + ceph osd crush move osd.3 host=node-5 + + ceph mon set_location a zone=iris host=node-2 + ceph mon set_location b zone=iris host=node-3 + ceph mon set_location c zone=pze host=node-4 + ceph mon set_location d zone=pze host=node-5 + + hostname=$(hostname -s) + ceph osd crush remove $hostname || return 1 + ceph osd getcrushmap > crushmap || return 1 + crushtool --decompile crushmap > crushmap.txt || return 1 + sed 's/^# end crush map$//' crushmap.txt > crushmap_modified.txt || return 1 + cat >> crushmap_modified.txt << EOF +rule stretch_rule { + id 1 + type replicated + min_size 1 + max_size 10 + step take iris + step chooseleaf firstn 2 type host + step emit + step take pze + step chooseleaf firstn 2 type host + step emit +} +# end crush map +EOF + + crushtool --compile crushmap_modified.txt -o crushmap.bin || return 1 + ceph osd setcrushmap -i crushmap.bin || return 1 + local stretched_poolname=stretched_rbdpool + ceph osd pool create $stretched_poolname 32 32 stretch_rule || return 1 + ceph osd pool set $stretched_poolname size 4 || return 1 + + ceph mon set_location e zone=arbiter host=node-1 || return 1 + ceph mon enable_stretch_mode e stretch_rule zone || return 1 # Enter strech mode + + # reweight to a more round decimal. + ceph osd crush reweight osd.0 $weight + ceph osd crush reweight osd.1 $weight + ceph osd crush reweight osd.2 $weight + ceph osd crush reweight osd.3 $weight + + # Firstly, we test for stretch mode buckets != 2 + ceph osd crush add-bucket sham zone || return 1 + ceph osd crush move sham root=default || return 1 + wait_for_health "INCORRECT_NUM_BUCKETS_STRETCH_MODE" || return 1 + + ceph osd crush rm sham # clear the health warn + wait_for_health_gone "INCORRECT_NUM_BUCKETS_STRETCH_MODE" || return 1 + + # Next, we test for uneven weights across buckets + + ceph osd crush reweight osd.0 0.07000 + + wait_for_health "UNEVEN_WEIGHTS_STRETCH_MODE" || return 1 + + ceph osd crush reweight osd.0 $weight # clear the health warn + + wait_for_health_gone "UNEVEN_WEIGHTS_STRETCH_MODE" || return 1 + + teardown $dir || return 1 +} +main mon-stretched-cluster-uneven-weight "$@" \ No newline at end of file diff --git a/ceph/qa/standalone/mon/mon-last-epoch-clean.sh b/ceph/qa/standalone/mon/mon-last-epoch-clean.sh index 172642e86..82243103e 100755 --- a/ceph/qa/standalone/mon/mon-last-epoch-clean.sh +++ b/ceph/qa/standalone/mon/mon-last-epoch-clean.sh @@ -173,7 +173,7 @@ function TEST_mon_last_clean_epoch() { local dir=$1 run_mon $dir a || return 1 - run_mgr $dir x || return 1 + run_mgr $dir x --mon-warn-on-pool-no-app=false || return 1 run_osd $dir 0 || return 1 run_osd $dir 1 || return 1 run_osd $dir 2 || return 1 diff --git a/ceph/qa/standalone/osd/divergent-priors.sh b/ceph/qa/standalone/osd/divergent-priors.sh index dec0e7ad4..40d72544d 100755 --- a/ceph/qa/standalone/osd/divergent-priors.sh +++ b/ceph/qa/standalone/osd/divergent-priors.sh @@ -650,13 +650,28 @@ function TEST_divergent_3() { # reproduce https://tracker.ceph.com/issues/41816 ceph osd pool set $poolname pg_autoscale_mode on - flush_pg_stats || return 1 - wait_for_clean || return 1 + divergent=-1 + start_time=$(date +%s) + max_duration=300 + + while [ "$divergent" -le -1 ] + do + flush_pg_stats || return 1 + wait_for_clean || return 1 + + # determine primary + divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')" + echo "primary and soon to be divergent is $divergent" + ceph pg dump pgs + + current_time=$(date +%s) + elapsed_time=$(expr $current_time - $start_time) + if [ "$elapsed_time" -gt "$max_duration" ]; then + echo "timed out waiting for divergent" + return 1 + fi + done - # determine primary - local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')" - echo "primary and soon to be divergent is $divergent" - ceph pg dump pgs local non_divergent="" for i in $osds do diff --git a/ceph/qa/suites/crimson-rados/basic/centos_8.stream.yaml b/ceph/qa/suites/crimson-rados/basic/centos_8.stream.yaml new file mode 120000 index 000000000..5dceec7e2 --- /dev/null +++ b/ceph/qa/suites/crimson-rados/basic/centos_8.stream.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_8.stream.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/basic/centos_latest.yaml b/ceph/qa/suites/crimson-rados/basic/centos_latest.yaml deleted file mode 120000 index bd9854e70..000000000 --- a/ceph/qa/suites/crimson-rados/basic/centos_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/basic/crimson-supported-all-distro b/ceph/qa/suites/crimson-rados/basic/crimson-supported-all-distro new file mode 120000 index 000000000..a5b729b9e --- /dev/null +++ b/ceph/qa/suites/crimson-rados/basic/crimson-supported-all-distro @@ -0,0 +1 @@ +.qa/distros/crimson-supported-all-distro/ \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/rbd/centos_8.stream.yaml b/ceph/qa/suites/crimson-rados/rbd/centos_8.stream.yaml new file mode 120000 index 000000000..5dceec7e2 --- /dev/null +++ b/ceph/qa/suites/crimson-rados/rbd/centos_8.stream.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_8.stream.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/rbd/centos_latest.yaml b/ceph/qa/suites/crimson-rados/rbd/centos_latest.yaml deleted file mode 120000 index bd9854e70..000000000 --- a/ceph/qa/suites/crimson-rados/rbd/centos_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/rbd/crimson-supported-all-distro b/ceph/qa/suites/crimson-rados/rbd/crimson-supported-all-distro new file mode 120000 index 000000000..a5b729b9e --- /dev/null +++ b/ceph/qa/suites/crimson-rados/rbd/crimson-supported-all-distro @@ -0,0 +1 @@ +.qa/distros/crimson-supported-all-distro/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli_v1/pool/none.yaml b/ceph/qa/suites/crimson-rados/singleton/% similarity index 100% rename from ceph/qa/suites/rbd/cli_v1/pool/none.yaml rename to ceph/qa/suites/crimson-rados/singleton/% diff --git a/ceph/qa/suites/crimson-rados/singleton/.qa b/ceph/qa/suites/crimson-rados/singleton/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/crimson-rados/singleton/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/singleton/all/.qa b/ceph/qa/suites/crimson-rados/singleton/all/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/ceph/qa/suites/crimson-rados/singleton/all/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/singleton/all/osd-backfill.yaml b/ceph/qa/suites/crimson-rados/singleton/all/osd-backfill.yaml new file mode 100644 index 000000000..f475d5dc3 --- /dev/null +++ b/ceph/qa/suites/crimson-rados/singleton/all/osd-backfill.yaml @@ -0,0 +1,29 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: + flavor: crimson +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr_pool false --force + log-ignorelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + conf: + osd: + osd min pg log entries: 5 +- osd_backfill: diff --git a/ceph/qa/suites/crimson-rados/singleton/crimson-supported-all-distro b/ceph/qa/suites/crimson-rados/singleton/crimson-supported-all-distro new file mode 120000 index 000000000..a5b729b9e --- /dev/null +++ b/ceph/qa/suites/crimson-rados/singleton/crimson-supported-all-distro @@ -0,0 +1 @@ +.qa/distros/crimson-supported-all-distro/ \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/singleton/crimson_qa_overrides.yaml b/ceph/qa/suites/crimson-rados/singleton/crimson_qa_overrides.yaml new file mode 120000 index 000000000..2bf67af1b --- /dev/null +++ b/ceph/qa/suites/crimson-rados/singleton/crimson_qa_overrides.yaml @@ -0,0 +1 @@ +.qa/config/crimson_qa_overrides.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/singleton/objectstore b/ceph/qa/suites/crimson-rados/singleton/objectstore new file mode 120000 index 000000000..dbccf5ad9 --- /dev/null +++ b/ceph/qa/suites/crimson-rados/singleton/objectstore @@ -0,0 +1 @@ +../thrash/objectstore \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/singleton/rados.yaml b/ceph/qa/suites/crimson-rados/singleton/rados.yaml new file mode 120000 index 000000000..e95c99ef2 --- /dev/null +++ b/ceph/qa/suites/crimson-rados/singleton/rados.yaml @@ -0,0 +1 @@ +./.qa/suites/rados/singleton/rados.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/thrash/centos_8.stream.yaml b/ceph/qa/suites/crimson-rados/thrash/centos_8.stream.yaml new file mode 120000 index 000000000..5dceec7e2 --- /dev/null +++ b/ceph/qa/suites/crimson-rados/thrash/centos_8.stream.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_8.stream.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/thrash/centos_latest.yaml b/ceph/qa/suites/crimson-rados/thrash/centos_latest.yaml deleted file mode 120000 index bd9854e70..000000000 --- a/ceph/qa/suites/crimson-rados/thrash/centos_latest.yaml +++ /dev/null @@ -1 +0,0 @@ -.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/crimson-rados/thrash/crimson-supported-all-distro b/ceph/qa/suites/crimson-rados/thrash/crimson-supported-all-distro new file mode 120000 index 000000000..a5b729b9e --- /dev/null +++ b/ceph/qa/suites/crimson-rados/thrash/crimson-supported-all-distro @@ -0,0 +1 @@ +.qa/distros/crimson-supported-all-distro/ \ No newline at end of file diff --git a/ceph/qa/suites/fs/functional/tasks/damage.yaml b/ceph/qa/suites/fs/functional/tasks/damage.yaml index ff8b3a58a..7703aee93 100644 --- a/ceph/qa/suites/fs/functional/tasks/damage.yaml +++ b/ceph/qa/suites/fs/functional/tasks/damage.yaml @@ -19,6 +19,7 @@ overrides: - MDS_READ_ONLY - force file system read-only - with standby daemon mds + - MDS abort because newly corrupt dentry tasks: - cephfs_test_runner: modules: diff --git a/ceph/qa/suites/rbd/encryption/pool/none.yaml b/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/+ similarity index 100% rename from ceph/qa/suites/rbd/encryption/pool/none.yaml rename to ceph/qa/suites/fs/mirror-ha/cephfs-mirror/+ diff --git a/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/1-volume-create-rm.yaml b/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/1-volume-create-rm.yaml new file mode 100644 index 000000000..4ee16e1c9 --- /dev/null +++ b/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/1-volume-create-rm.yaml @@ -0,0 +1,14 @@ +meta: +- desc: create/rm volumes and set configs + +tasks: + - exec: + mon.a: + - "ceph fs volume create dc" + - "ceph fs volume create dc-backup" + - full_sequential_finally: + - exec: + mon.a: + - ceph config set mon mon_allow_pool_delete true + - ceph fs volume rm dc --yes-i-really-mean-it + - ceph fs volume rm dc-backup --yes-i-really-mean-it diff --git a/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/three-per-cluster.yaml b/ceph/qa/suites/fs/mirror-ha/cephfs-mirror/2-three-per-cluster.yaml similarity index 100% rename from ceph/qa/suites/fs/mirror-ha/cephfs-mirror/three-per-cluster.yaml rename to ceph/qa/suites/fs/mirror-ha/cephfs-mirror/2-three-per-cluster.yaml diff --git a/ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml b/ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml index f43a9a4ba..ce4dddf78 100644 --- a/ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml +++ b/ceph/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml @@ -8,17 +8,6 @@ overrides: debug client: 10 tasks: - - exec: - mon.a: - - "ceph fs volume create dc" - - "ceph fs volume create dc-backup" - # Remove volumes during unwind to avoid MDS replacement warnings: - - full_sequential_finally: - - exec: - mon.a: - - ceph config set mon mon_allow_pool_delete true - - ceph fs volume rm dc --yes-i-really-mean-it - - ceph fs volume rm dc-backup --yes-i-really-mean-it - ceph-fuse: client.1: cephfs_name: dc diff --git a/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml b/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml index 5329fa67f..8293595e2 100644 --- a/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml +++ b/ceph/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml @@ -2,16 +2,24 @@ tasks: - pexec: clients: + - set -x - cd $TESTDIR - - wget http://download.ceph.com/qa/ior.tbz2 - - tar xvfj ior.tbz2 - - cd ior + # partially or incorrectly installed mpich will create a mess and the + # configure script or the build process (which is initiated using "make" + # command) for the ior project will fail + - sudo apt purge -y mpich + - sudo apt install -y mpich + - wget http://download.ceph.com/qa/ior-3.3.0.tar.bz2 + - tar xvfj ior-3.3.0.tar.bz2 + - cd ior-3.3.0 - ./configure - make - make install DESTDIR=$TESTDIR/binary/ - cd $TESTDIR/ - - rm ior.tbz2 - - rm -r ior + - sudo apt install -y tree + - tree binary/ + - rm ior-3.3.0.tar.bz2 + - rm -r ior-3.3.0 - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt - ssh_keys: - mpi: diff --git a/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml b/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml index c2bbd2fbe..32720e488 100644 --- a/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml +++ b/ceph/qa/suites/fs/multiclient/tasks/mdtest.yaml @@ -2,19 +2,33 @@ tasks: - pexec: clients: + - set -x - cd $TESTDIR - - wget http://download.ceph.com/qa/mdtest-1.9.3.tgz - - mkdir mdtest-1.9.3 - - cd mdtest-1.9.3 - - tar xvfz $TESTDIR/mdtest-1.9.3.tgz - - rm $TESTDIR/mdtest-1.9.3.tgz - - MPI_CC=mpicc make + - sudo apt purge -y mpich + - sudo apt install -y mpich + # use ior project instead of mdtest project because latter has been + # merged into former. See: + # https://github.com/MDTEST-LANL/mdtest/blob/master/README.md + - wget http://download.ceph.com/qa/ior-3.3.0.tar.bz2 + - tar xvfj ior-3.3.0.tar.bz2 + - cd ior-3.3.0 + # this option was set originall when mdtest binary was built using + # mdtest PR and not through ior project. + #- MPI_CC=mpicc make + - ./configure + - make + - make install DESTDIR=$TESTDIR/binary/ + - cd $TESTDIR/ + - sudo apt install -y tree + - tree binary/ + - rm ior-3.3.0.tar.bz2 + - rm -r ior-3.3.0 - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt - ssh_keys: - mpi: - exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R + exec: $TESTDIR/binary/usr/local/bin/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R - pexec: clients: + - rm -f $TESTDIR/gmnt/ior.testfile - rm -f $TESTDIR/gmnt - - rm -rf $TESTDIR/mdtest-1.9.3 - - rm -rf $TESTDIR/._mdtest-1.9.3 + - rm -rf $TESTDIR/binary diff --git a/ceph/qa/suites/rbd/librbd/config/none.yaml b/ceph/qa/suites/fs/nfs/% similarity index 100% rename from ceph/qa/suites/rbd/librbd/config/none.yaml rename to ceph/qa/suites/fs/nfs/% diff --git a/ceph/qa/suites/krbd/singleton/msgr-failures/.qa b/ceph/qa/suites/fs/nfs/.qa similarity index 100% rename from ceph/qa/suites/krbd/singleton/msgr-failures/.qa rename to ceph/qa/suites/fs/nfs/.qa diff --git a/ceph/qa/suites/rbd/librbd/pool/none.yaml b/ceph/qa/suites/fs/nfs/cluster/+ similarity index 100% rename from ceph/qa/suites/rbd/librbd/pool/none.yaml rename to ceph/qa/suites/fs/nfs/cluster/+ diff --git a/ceph/qa/suites/rbd/basic/cachepool/.qa b/ceph/qa/suites/fs/nfs/cluster/.qa similarity index 100% rename from ceph/qa/suites/rbd/basic/cachepool/.qa rename to ceph/qa/suites/fs/nfs/cluster/.qa diff --git a/ceph/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml b/ceph/qa/suites/fs/nfs/cluster/1-node.yaml similarity index 65% rename from ceph/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml rename to ceph/qa/suites/fs/nfs/cluster/1-node.yaml index 8448c1a2f..8eeec7d2d 100644 --- a/ceph/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml +++ b/ceph/qa/suites/fs/nfs/cluster/1-node.yaml @@ -1,10 +1,12 @@ +meta: +- desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 2 mds, 1 client roles: - - host.a + - mon.a + - mgr.x - osd.0 - osd.1 - osd.2 - - mon.a - - mgr.a - client.0 tasks: - install: @@ -12,6 +14,3 @@ tasks: - cephadm.shell: host.a: - ceph orch apply mds a -- cephfs_test_runner: - modules: - - tasks.cephfs.test_nfs diff --git a/ceph/qa/suites/rbd/cli/pool/.qa b/ceph/qa/suites/fs/nfs/overrides/.qa similarity index 100% rename from ceph/qa/suites/rbd/cli/pool/.qa rename to ceph/qa/suites/fs/nfs/overrides/.qa diff --git a/ceph/qa/suites/fs/nfs/overrides/ignorelist_health.yaml b/ceph/qa/suites/fs/nfs/overrides/ignorelist_health.yaml new file mode 100644 index 000000000..8bfe4dc6f --- /dev/null +++ b/ceph/qa/suites/fs/nfs/overrides/ignorelist_health.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-ignorelist: + - overall HEALTH_ + - \(FS_DEGRADED\) + - \(MDS_FAILED\) + - \(MDS_DEGRADED\) + - \(FS_WITH_FAILED_MDS\) + - \(MDS_DAMAGE\) + - \(MDS_ALL_DOWN\) + - \(MDS_UP_LESS_THAN_MAX\) + - \(FS_INLINE_DATA_DEPRECATED\) + - \(OSD_DOWN\) diff --git a/ceph/qa/suites/rgw/crypt/supported-random-distro$ b/ceph/qa/suites/fs/nfs/supported-random-distros$ similarity index 100% rename from ceph/qa/suites/rgw/crypt/supported-random-distro$ rename to ceph/qa/suites/fs/nfs/supported-random-distros$ diff --git a/ceph/qa/suites/rbd/cli_v1/pool/.qa b/ceph/qa/suites/fs/nfs/tasks/.qa similarity index 100% rename from ceph/qa/suites/rbd/cli_v1/pool/.qa rename to ceph/qa/suites/fs/nfs/tasks/.qa diff --git a/ceph/qa/suites/fs/nfs/tasks/nfs.yaml b/ceph/qa/suites/fs/nfs/tasks/nfs.yaml new file mode 100644 index 000000000..aa966bff2 --- /dev/null +++ b/ceph/qa/suites/fs/nfs/tasks/nfs.yaml @@ -0,0 +1,4 @@ +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_nfs diff --git a/ceph/qa/suites/fs/thrash/multifs/overrides/client-shutdown.yaml b/ceph/qa/suites/fs/thrash/multifs/overrides/client-shutdown.yaml new file mode 100644 index 000000000..30b2ea981 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/multifs/overrides/client-shutdown.yaml @@ -0,0 +1,6 @@ +# Lengthen the timeout for thrashed MDS +overrides: + ceph: + conf: + client: + client_shutdown_timeout: 120 diff --git a/ceph/qa/suites/fs/thrash/workloads/overrides/client-shutdown.yaml b/ceph/qa/suites/fs/thrash/workloads/overrides/client-shutdown.yaml new file mode 100644 index 000000000..30b2ea981 --- /dev/null +++ b/ceph/qa/suites/fs/thrash/workloads/overrides/client-shutdown.yaml @@ -0,0 +1,6 @@ +# Lengthen the timeout for thrashed MDS +overrides: + ceph: + conf: + client: + client_shutdown_timeout: 120 diff --git a/ceph/qa/suites/fs/workload/tasks/5-workunit/postgres.yaml b/ceph/qa/suites/fs/workload/tasks/5-workunit/postgres.yaml index 12a84a9ba..7e71dbc88 100644 --- a/ceph/qa/suites/fs/workload/tasks/5-workunit/postgres.yaml +++ b/ceph/qa/suites/fs/workload/tasks/5-workunit/postgres.yaml @@ -30,7 +30,7 @@ tasks: - sudo -u postgres -- postgresql-setup --initdb - sudo ls -lZaR /tmp/cephfs/postgres/ - sudo systemctl start postgresql - - sudo -u postgres -- pgbench -s 500 -i + - sudo -u postgres -- pgbench -s 32 -i - sudo -u postgres -- pgbench -c 100 -j 4 --progress=5 --time=900 - sudo systemctl stop postgresql - sudo ls -lZaR /tmp/cephfs/postgres/ diff --git a/ceph/qa/suites/krbd/basic/conf.yaml b/ceph/qa/suites/krbd/basic/conf.yaml index 5e7ed992e..41292fa81 100644 --- a/ceph/qa/suites/krbd/basic/conf.yaml +++ b/ceph/qa/suites/krbd/basic/conf.yaml @@ -2,6 +2,7 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false client: rbd default features: 37 diff --git a/ceph/qa/suites/krbd/fsx/conf.yaml b/ceph/qa/suites/krbd/fsx/conf.yaml index 30da870b2..eb6d72a80 100644 --- a/ceph/qa/suites/krbd/fsx/conf.yaml +++ b/ceph/qa/suites/krbd/fsx/conf.yaml @@ -2,4 +2,5 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false diff --git a/ceph/qa/suites/krbd/ms_modeless/conf.yaml b/ceph/qa/suites/krbd/ms_modeless/conf.yaml index 30da870b2..eb6d72a80 100644 --- a/ceph/qa/suites/krbd/ms_modeless/conf.yaml +++ b/ceph/qa/suites/krbd/ms_modeless/conf.yaml @@ -2,4 +2,5 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false diff --git a/ceph/qa/suites/krbd/rbd-nomount/conf.yaml b/ceph/qa/suites/krbd/rbd-nomount/conf.yaml index 5e7ed992e..41292fa81 100644 --- a/ceph/qa/suites/krbd/rbd-nomount/conf.yaml +++ b/ceph/qa/suites/krbd/rbd-nomount/conf.yaml @@ -2,6 +2,7 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false client: rbd default features: 37 diff --git a/ceph/qa/suites/krbd/rbd/conf.yaml b/ceph/qa/suites/krbd/rbd/conf.yaml index 5e7ed992e..41292fa81 100644 --- a/ceph/qa/suites/krbd/rbd/conf.yaml +++ b/ceph/qa/suites/krbd/rbd/conf.yaml @@ -2,6 +2,7 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false client: rbd default features: 37 diff --git a/ceph/qa/suites/rbd/migration/5-pool/none.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/% similarity index 100% rename from ceph/qa/suites/rbd/migration/5-pool/none.yaml rename to ceph/qa/suites/krbd/singleton-msgr-failures/% diff --git a/ceph/qa/suites/rbd/encryption/pool/.qa b/ceph/qa/suites/krbd/singleton-msgr-failures/.qa similarity index 100% rename from ceph/qa/suites/rbd/encryption/pool/.qa rename to ceph/qa/suites/krbd/singleton-msgr-failures/.qa diff --git a/ceph/qa/suites/krbd/singleton-msgr-failures/bluestore-bitmap.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/bluestore-bitmap.yaml new file mode 120000 index 000000000..a59cf5175 --- /dev/null +++ b/ceph/qa/suites/krbd/singleton-msgr-failures/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/ceph/qa/suites/krbd/singleton-msgr-failures/conf.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/conf.yaml new file mode 100644 index 000000000..5e7ed992e --- /dev/null +++ b/ceph/qa/suites/krbd/singleton-msgr-failures/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/ceph/qa/suites/rbd/librbd/config/.qa b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/.qa similarity index 100% rename from ceph/qa/suites/rbd/librbd/config/.qa rename to ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/.qa diff --git a/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc-rxbounce.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc-rxbounce.yaml new file mode 100644 index 000000000..4d27d0113 --- /dev/null +++ b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc-rxbounce.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc,rxbounce diff --git a/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc.yaml new file mode 100644 index 000000000..3b072578f --- /dev/null +++ b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy-rxbounce.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy-rxbounce.yaml new file mode 100644 index 000000000..244e45cbc --- /dev/null +++ b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy-rxbounce.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy,rxbounce diff --git a/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy.yaml new file mode 100644 index 000000000..0048dcb0c --- /dev/null +++ b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/secure.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/secure.yaml new file mode 100644 index 000000000..a735db18d --- /dev/null +++ b/ceph/qa/suites/krbd/singleton-msgr-failures/ms_mode$/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/ceph/qa/suites/rbd/librbd/pool/.qa b/ceph/qa/suites/krbd/singleton-msgr-failures/msgr-failures/.qa similarity index 100% rename from ceph/qa/suites/rbd/librbd/pool/.qa rename to ceph/qa/suites/krbd/singleton-msgr-failures/msgr-failures/.qa diff --git a/ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/msgr-failures/few.yaml similarity index 100% rename from ceph/qa/suites/krbd/singleton/msgr-failures/few.yaml rename to ceph/qa/suites/krbd/singleton-msgr-failures/msgr-failures/few.yaml diff --git a/ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/msgr-failures/many.yaml similarity index 100% rename from ceph/qa/suites/krbd/singleton/msgr-failures/many.yaml rename to ceph/qa/suites/krbd/singleton-msgr-failures/msgr-failures/many.yaml diff --git a/ceph/qa/suites/rbd/migration/5-pool/.qa b/ceph/qa/suites/krbd/singleton-msgr-failures/tasks/.qa similarity index 100% rename from ceph/qa/suites/rbd/migration/5-pool/.qa rename to ceph/qa/suites/krbd/singleton-msgr-failures/tasks/.qa diff --git a/ceph/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml b/ceph/qa/suites/krbd/singleton-msgr-failures/tasks/rbd_xfstests.yaml similarity index 100% rename from ceph/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml rename to ceph/qa/suites/krbd/singleton-msgr-failures/tasks/rbd_xfstests.yaml diff --git a/ceph/qa/suites/krbd/singleton/conf.yaml b/ceph/qa/suites/krbd/singleton/conf.yaml index 5e7ed992e..41292fa81 100644 --- a/ceph/qa/suites/krbd/singleton/conf.yaml +++ b/ceph/qa/suites/krbd/singleton/conf.yaml @@ -2,6 +2,7 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false client: rbd default features: 37 diff --git a/ceph/qa/suites/krbd/singleton/tasks/krbd_watch_errors.yaml b/ceph/qa/suites/krbd/singleton/tasks/krbd_watch_errors.yaml new file mode 100644 index 000000000..5e30ef2ba --- /dev/null +++ b/ceph/qa/suites/krbd/singleton/tasks/krbd_watch_errors.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + conf: + global: + osd pool default size: 1 + osd: + osd shutdown pgref assert: true +roles: +- [mon.a, mgr.x, osd.0, client.0] + +tasks: +- install: + extra_system_packages: + - fio +- ceph: +- workunit: + clients: + all: + - rbd/krbd_watch_errors.sh diff --git a/ceph/qa/suites/krbd/thrash/conf.yaml b/ceph/qa/suites/krbd/thrash/conf.yaml index 5e7ed992e..41292fa81 100644 --- a/ceph/qa/suites/krbd/thrash/conf.yaml +++ b/ceph/qa/suites/krbd/thrash/conf.yaml @@ -2,6 +2,7 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false client: rbd default features: 37 diff --git a/ceph/qa/suites/krbd/unmap/conf.yaml b/ceph/qa/suites/krbd/unmap/conf.yaml index 8984e8dc8..e52341f29 100644 --- a/ceph/qa/suites/krbd/unmap/conf.yaml +++ b/ceph/qa/suites/krbd/unmap/conf.yaml @@ -1,5 +1,7 @@ overrides: ceph: conf: + global: + mon warn on pool no app: false client: rbd default features: 1 # pre-single-major is v3.13, so layering only diff --git a/ceph/qa/suites/krbd/wac/sysfs/conf.yaml b/ceph/qa/suites/krbd/wac/sysfs/conf.yaml index 5e7ed992e..41292fa81 100644 --- a/ceph/qa/suites/krbd/wac/sysfs/conf.yaml +++ b/ceph/qa/suites/krbd/wac/sysfs/conf.yaml @@ -2,6 +2,7 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false client: rbd default features: 37 diff --git a/ceph/qa/suites/krbd/wac/wac/conf.yaml b/ceph/qa/suites/krbd/wac/wac/conf.yaml index 5e7ed992e..41292fa81 100644 --- a/ceph/qa/suites/krbd/wac/wac/conf.yaml +++ b/ceph/qa/suites/krbd/wac/wac/conf.yaml @@ -2,6 +2,7 @@ overrides: ceph: conf: global: + mon warn on pool no app: false ms die on skipped message: false client: rbd default features: 37 diff --git a/ceph/qa/suites/orch/cephadm/nfs b/ceph/qa/suites/orch/cephadm/nfs new file mode 120000 index 000000000..628e2a2a2 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/nfs @@ -0,0 +1 @@ +.qa/suites/fs/nfs/ \ No newline at end of file diff --git a/ceph/qa/suites/orch/cephadm/orchestrator_cli/orchestrator_cli.yaml b/ceph/qa/suites/orch/cephadm/orchestrator_cli/orchestrator_cli.yaml index 564a2eb02..3e6e7f955 100644 --- a/ceph/qa/suites/orch/cephadm/orchestrator_cli/orchestrator_cli.yaml +++ b/ceph/qa/suites/orch/cephadm/orchestrator_cli/orchestrator_cli.yaml @@ -13,6 +13,7 @@ tasks: - \(PG_ - replacing it with standby - No standby daemons available + - \(POOL_APP_NOT_ENABLED\) - cephfs_test_runner: modules: - - tasks.mgr.test_orchestrator_cli \ No newline at end of file + - tasks.mgr.test_orchestrator_cli diff --git a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml new file mode 100644 index 000000000..477e5c443 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml @@ -0,0 +1,35 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +# use nfs module to create cluster and export +- cephadm.shell: + host.a: + - ceph fs volume create fs1 + - ceph nfs cluster create happy --ingress --virtual-ip={{VIP0}} --ingress-mode=haproxy-protocol + - ceph nfs export create cephfs --fsname fs1 --cluster-id happy --pseudo-path /d1 + +# wait for services to start +- cephadm.wait_for_service: + service: nfs.happy +- cephadm.wait_for_service: + service: ingress.nfs.happy + +# make sure mount can be reached over VIP, ensuring both that +# keepalived is maintaining the VIP and that the nfs has bound to it +- vip.exec: + host.a: + - mkdir /mnt/happy + - sleep 1 + - mount -t nfs {{VIP0}}:/d1 /mnt/happy + - echo test > /mnt/happy/testfile + - sync diff --git a/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml new file mode 100644 index 000000000..4c5e26740 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml @@ -0,0 +1,8 @@ +tasks: +- cephadm.shell: + host.a: + - ceph osd pool create foo + - rbd pool init foo + - ceph orch apply nvmeof foo +- cephadm.wait_for_service: + service: nvmeof.foo diff --git a/ceph/qa/suites/orch/cephadm/workunits/task/test_ca_signed_key.yaml b/ceph/qa/suites/orch/cephadm/workunits/task/test_ca_signed_key.yaml new file mode 100644 index 000000000..7bf51f719 --- /dev/null +++ b/ceph/qa/suites/orch/cephadm/workunits/task/test_ca_signed_key.yaml @@ -0,0 +1,31 @@ +roles: +- - host.a + - mon.a + - mgr.a + - osd.0 + - client.0 +- - host.b + - mon.b + - mgr.b + - osd.1 + - client.1 +overrides: + cephadm: + use-ca-signed-key: True +tasks: +- install: +- cephadm: +- cephadm.shell: + host.a: + - | + set -ex + HOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname') + for host in $HOSTNAMES; do + # do a check-host on each host to make sure it's reachable + ceph cephadm check-host ${host} 2> ${host}-ok.txt + HOST_OK=$(cat ${host}-ok.txt) + if ! grep -q "Host looks OK" <<< "$HOST_OK"; then + printf "Failed host check:\n\n$HOST_OK" + exit 1 + fi + done diff --git a/ceph/qa/suites/perf-basic/objectstore/bluestore.yaml b/ceph/qa/suites/perf-basic/objectstore/bluestore.yaml index f5793d76e..699db42d7 100644 --- a/ceph/qa/suites/perf-basic/objectstore/bluestore.yaml +++ b/ceph/qa/suites/perf-basic/objectstore/bluestore.yaml @@ -1,6 +1,8 @@ overrides: ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd objectstore: bluestore diff --git a/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml index 7b5c89b9b..9ff0a4150 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_stress_watch.yaml @@ -4,6 +4,7 @@ overrides: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(TOO_FEW_PGS\) + - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: clients: diff --git a/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml b/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml index c19cc83a9..cafd824b0 100644 --- a/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml +++ b/ceph/qa/suites/rados/basic/tasks/rados_striper.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/basic/tasks/readwrite.yaml b/ceph/qa/suites/rados/basic/tasks/readwrite.yaml index f135107c7..fc13e67f0 100644 --- a/ceph/qa/suites/rados/basic/tasks/readwrite.yaml +++ b/ceph/qa/suites/rados/basic/tasks/readwrite.yaml @@ -6,6 +6,8 @@ overrides: mon osd initial require min compat client: luminous osd: osd_discard_disconnected_ops: false + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/basic/tasks/repair_test.yaml b/ceph/qa/suites/rados/basic/tasks/repair_test.yaml index 0d3749f4d..383acc956 100644 --- a/ceph/qa/suites/rados/basic/tasks/repair_test.yaml +++ b/ceph/qa/suites/rados/basic/tasks/repair_test.yaml @@ -22,6 +22,7 @@ overrides: - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ + - \(POOL_APP_NOT_ENABLED\) conf: osd: filestore debug inject read err: true diff --git a/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml b/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml index dde468940..424657343 100644 --- a/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml +++ b/ceph/qa/suites/rados/basic/tasks/scrub_test.yaml @@ -22,6 +22,7 @@ overrides: - \(PG_ - \(OSD_SCRUB_ERRORS\) - \(TOO_FEW_PGS\) + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd deep scrub update digest min age: 0 diff --git a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml index c781061de..e7622f8c3 100644 --- a/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml +++ b/ceph/qa/suites/rados/dashboard/tasks/dashboard.yaml @@ -7,6 +7,8 @@ overrides: conf: osd: osd mclock override recovery settings: true + mgr: + mon warn on pool no app: false tasks: - install: diff --git a/ceph/qa/suites/rados/mgr/tasks/crash.yaml b/ceph/qa/suites/rados/mgr/tasks/crash.yaml index af4c40642..9d2ba535e 100644 --- a/ceph/qa/suites/rados/mgr/tasks/crash.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/crash.yaml @@ -12,6 +12,7 @@ tasks: - \(RECENT_CRASH\) - replacing it with standby - No standby daemons available + - \(POOL_APP_NOT_ENABLED\) - cephfs_test_runner: modules: - tasks.mgr.test_crash diff --git a/ceph/qa/suites/rados/mgr/tasks/failover.yaml b/ceph/qa/suites/rados/mgr/tasks/failover.yaml index 42c2f5c5b..6d1e0d557 100644 --- a/ceph/qa/suites/rados/mgr/tasks/failover.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/failover.yaml @@ -11,6 +11,7 @@ tasks: - \(PG_ - replacing it with standby - No standby daemons available + - \(POOL_APP_NOT_ENABLED\) - cephfs_test_runner: modules: - tasks.mgr.test_failover diff --git a/ceph/qa/suites/rados/mgr/tasks/insights.yaml b/ceph/qa/suites/rados/mgr/tasks/insights.yaml index 5cb124bf7..f7c82cf7f 100644 --- a/ceph/qa/suites/rados/mgr/tasks/insights.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/insights.yaml @@ -14,6 +14,7 @@ tasks: - \(RECENT_CRASH\) - replacing it with standby - No standby daemons available + - \(POOL_APP_NOT_ENABLED\) - cephfs_test_runner: modules: - tasks.mgr.test_insights diff --git a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml index 9b8209fa9..4403d9fff 100644 --- a/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/module_selftest.yaml @@ -21,6 +21,7 @@ tasks: - Failed to open Telegraf - evicting unresponsive client - 1 mgr modules have recently crashed \(RECENT_MGR_MODULE_CRASH\) + - \(POOL_APP_NOT_ENABLED\) - cephfs_test_runner: modules: - tasks.mgr.test_module_selftest diff --git a/ceph/qa/suites/rados/mgr/tasks/per_module_finisher_stats.yaml b/ceph/qa/suites/rados/mgr/tasks/per_module_finisher_stats.yaml index c487b21fc..de1d592df 100644 --- a/ceph/qa/suites/rados/mgr/tasks/per_module_finisher_stats.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/per_module_finisher_stats.yaml @@ -2,6 +2,8 @@ tasks: - install: - ceph: wait-for-scrub: false + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - check-counter: counters: mgr: diff --git a/ceph/qa/suites/rados/mgr/tasks/progress.yaml b/ceph/qa/suites/rados/mgr/tasks/progress.yaml index d04908eab..183a9a29a 100644 --- a/ceph/qa/suites/rados/mgr/tasks/progress.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/progress.yaml @@ -24,6 +24,7 @@ tasks: - \(OSDMAP_FLAGS\) - replacing it with standby - No standby daemons available + - \(POOL_APP_NOT_ENABLED\) - cephfs_test_runner: modules: - tasks.mgr.test_progress diff --git a/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml b/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml index 388e19678..fd0e23a35 100644 --- a/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/prometheus.yaml @@ -11,6 +11,7 @@ tasks: - \(PG_ - replacing it with standby - No standby daemons available + - \(POOL_APP_NOT_ENABLED\) - cephfs_test_runner: modules: - tasks.mgr.test_prometheus diff --git a/ceph/qa/suites/rados/mgr/tasks/workunits.yaml b/ceph/qa/suites/rados/mgr/tasks/workunits.yaml index 791adc272..a48274033 100644 --- a/ceph/qa/suites/rados/mgr/tasks/workunits.yaml +++ b/ceph/qa/suites/rados/mgr/tasks/workunits.yaml @@ -10,6 +10,7 @@ tasks: - \(PG_ - replacing it with standby - No standby daemons available + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: client.0: diff --git a/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml index c2ec78fd3..f4c98ae27 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml @@ -4,6 +4,7 @@ overrides: - overall HEALTH_ - \(MON_DOWN\) - \(TOO_FEW_PGS\) + - \(POOL_APP_NOT_ENABLED\) tasks: - mon_thrash: revive_delay: 90 diff --git a/ceph/qa/suites/rados/monthrash/thrashers/many.yaml b/ceph/qa/suites/rados/monthrash/thrashers/many.yaml index 958831232..2f5de97e3 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/many.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/many.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) + - \(POOL_APP_NOT_ENABLED\) conf: osd: mon client ping interval: 4 diff --git a/ceph/qa/suites/rados/monthrash/thrashers/one.yaml b/ceph/qa/suites/rados/monthrash/thrashers/one.yaml index e969a0d8d..3a71edaf1 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/one.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/one.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) + - \(POOL_APP_NOT_ENABLED\) tasks: - mon_thrash: revive_delay: 20 diff --git a/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml index e721b9b38..6bb25b7eb 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/sync-many.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) + - \(POOL_APP_NOT_ENABLED\) conf: mon: paxos min: 10 diff --git a/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml b/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml index 8fdd1ad48..30f133055 100644 --- a/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml +++ b/ceph/qa/suites/rados/monthrash/thrashers/sync.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) + - \(POOL_APP_NOT_ENABLED\) conf: mon: paxos min: 10 diff --git a/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml index 522302cd7..6e8dadaf0 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - slow request - overall HEALTH_ - - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml index ad19bd341..bf7eee534 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/rados_5925.yaml @@ -2,7 +2,6 @@ overrides: ceph: log-ignorelist: - overall HEALTH_ - - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml index 3f3c0768a..f14c2c2a6 100644 --- a/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml +++ b/ceph/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml @@ -9,7 +9,6 @@ overrides: - \(SLOW_OPS\) - \(MON_DOWN\) - \(PG_ - - \(POOL_APP_NOT_ENABLED\) - \(SMALLER_PGP_NUM\) - slow request conf: diff --git a/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml index 2a4bf2bac..691bd1efc 100644 --- a/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml +++ b/ceph/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml @@ -7,5 +7,6 @@ tasks: - clocks not synchronized - overall HEALTH_ - \(MON_CLOCK_SKEW\) + - \(POOL_APP_NOT_ENABLED\) - mon_clock_skew_check: expect-skew: false diff --git a/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml index 41749349e..079e1555b 100644 --- a/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml +++ b/ceph/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml @@ -20,5 +20,6 @@ tasks: - \(SLOW_OPS\) - No standby daemons available - slow request + - \(POOL_APP_NOT_ENABLED\) - mon_clock_skew_check: expect-skew: true diff --git a/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml index 6373663c5..18e53e092 100644 --- a/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml +++ b/ceph/qa/suites/rados/multimon/tasks/mon_recovery.yaml @@ -7,4 +7,5 @@ tasks: - \(PG_AVAILABILITY\) - \(SLOW_OPS\) - slow request + - \(POOL_APP_NOT_ENABLED\) - mon_recovery: diff --git a/ceph/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml b/ceph/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml index e407a3918..ae0f8f381 100644 --- a/ceph/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml +++ b/ceph/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml @@ -8,6 +8,8 @@ tasks: - install: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/perf/ceph.yaml b/ceph/qa/suites/rados/perf/ceph.yaml index 8bc0431b0..ca229dd46 100644 --- a/ceph/qa/suites/rados/perf/ceph.yaml +++ b/ceph/qa/suites/rados/perf/ceph.yaml @@ -15,4 +15,5 @@ tasks: - \(OSD_ - \(OBJECT_ - overall HEALTH + - \(POOL_APP_NOT_ENABLED\) - ssh_keys: diff --git a/ceph/qa/suites/rados/rest/mgr-restful.yaml b/ceph/qa/suites/rados/rest/mgr-restful.yaml index c863463de..4901f401d 100644 --- a/ceph/qa/suites/rados/rest/mgr-restful.yaml +++ b/ceph/qa/suites/rados/rest/mgr-restful.yaml @@ -14,6 +14,7 @@ tasks: - \(OSD_ - \(OBJECT_ - \(OSDMAP_FLAGS\) + - \(POOL_APP_NOT_ENABLED\) - exec: mon.a: - ceph restful create-key admin diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml index a9f831617..341a559f3 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -15,6 +15,7 @@ overrides: - \(OSD_FULL\) - \(MDS_READ_ONLY\) - \(POOL_FULL\) + - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml index eb30c663a..c42c5539d 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/balancer.yaml @@ -8,6 +8,7 @@ tasks: fs: xfs log-ignorelist: - \(PG_AVAILABILITY\) + - \(POOL_APP_NOT_ENABLED\) - cram: clients: client.0: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml index d7699d042..fddbd0723 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml @@ -12,6 +12,7 @@ tasks: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) conf: global: osd max object name len: 460 diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml index a3085ad3d..6a8faa4a8 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml @@ -17,6 +17,8 @@ overrides: tasks: - install: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml index 7785f2453..b4ce5468a 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -17,6 +17,7 @@ tasks: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) conf: global: osd max object name len: 460 diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml index a3704069f..a06221449 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -14,6 +14,7 @@ overrides: - \(POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) - \(CACHE_POOL_NEAR_FULL\) + - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml index 7b7bf592f..5ed655324 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml @@ -16,6 +16,7 @@ tasks: - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml index 8b95603d1..15952b989 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -22,6 +22,7 @@ tasks: - \(PG_ - \(OSD_ - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .3 diff --git a/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml index e79e1aaf5..26d548430 100644 --- a/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml +++ b/ceph/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml @@ -9,6 +9,8 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/singleton/all/admin-socket.yaml b/ceph/qa/suites/rados/singleton/all/admin-socket.yaml index 9ed67c249..0d88e6f2a 100644 --- a/ceph/qa/suites/rados/singleton/all/admin-socket.yaml +++ b/ceph/qa/suites/rados/singleton/all/admin-socket.yaml @@ -11,6 +11,8 @@ openstack: tasks: - install: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - admin_socket: osd.0: version: diff --git a/ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml b/ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml index fcc3d0e29..d53e9a3b2 100644 --- a/ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml +++ b/ceph/qa/suites/rados/singleton/all/backfill-toofull.yaml @@ -30,6 +30,7 @@ tasks: - \(TOO_FEW_PGS\) - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running - slow request + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd min pg log entries: 5 diff --git a/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml b/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml index eb70b70ff..f561795bd 100644 --- a/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml +++ b/ceph/qa/suites/rados/singleton/all/dump-stuck.yaml @@ -18,4 +18,5 @@ tasks: - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ + - \(POOL_APP_NOT_ENABLED\) - dump_stuck: diff --git a/ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml b/ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml index d71eab149..31724f9e8 100644 --- a/ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml +++ b/ceph/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml @@ -29,6 +29,7 @@ tasks: - repair - slow request - unfound + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd min pg log entries: 5 diff --git a/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml index 050365ff4..d397b005c 100644 --- a/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml +++ b/ceph/qa/suites/rados/singleton/all/ec-lost-unfound.yaml @@ -26,4 +26,5 @@ tasks: - \(OBJECT_ - \(SLOW_OPS\) - slow request + - \(POOL_APP_NOT_ENABLED\) - ec_lost_unfound: diff --git a/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml index e4b7b11e2..a6b68cd50 100644 --- a/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml +++ b/ceph/qa/suites/rados/singleton/all/lost-unfound-delete.yaml @@ -24,4 +24,5 @@ tasks: - \(OBJECT_ - \(SLOW_OPS\) - slow request + - \(POOL_APP_NOT_ENABLED\) - rep_lost_unfound_delete: diff --git a/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml b/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml index 280dac87b..4010a5208 100644 --- a/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml +++ b/ceph/qa/suites/rados/singleton/all/lost-unfound.yaml @@ -24,4 +24,5 @@ tasks: - \(OBJECT_ - \(SLOW_OPS\) - slow request + - \(POOL_APP_NOT_ENABLED\) - lost_unfound: diff --git a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml index 7ab945898..e7eded31e 100644 --- a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml +++ b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml @@ -21,6 +21,7 @@ overrides: log-ignorelist: - \(TOO_FEW_PGS\) - \(PENDING_CREATING_PGS\) + - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: diff --git a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml index a51e8921f..dc7c3f0f5 100644 --- a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml +++ b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml @@ -24,6 +24,7 @@ overrides: - \(TOO_FEW_PGS\) - \(PG_ - \(PENDING_CREATING_PGS\) + - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: diff --git a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml index e3658ef77..ee0dae1d4 100644 --- a/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml +++ b/ceph/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml @@ -24,6 +24,7 @@ overrides: - \(TOO_FEW_PGS\) - \(PG_ - \(PENDING_CREATING_PGS\) + - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: diff --git a/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml index a56d164a5..264dc535a 100644 --- a/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml +++ b/ceph/qa/suites/rados/singleton/all/mon-auth-caps.yaml @@ -13,6 +13,7 @@ tasks: log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml b/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml index f254754e2..c475a2080 100644 --- a/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml +++ b/ceph/qa/suites/rados/singleton/all/mon-config-key-caps.yaml @@ -13,6 +13,7 @@ tasks: log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/singleton/all/mon-config.yaml b/ceph/qa/suites/rados/singleton/all/mon-config.yaml index 15d48f238..ab1eb81b0 100644 --- a/ceph/qa/suites/rados/singleton/all/mon-config.yaml +++ b/ceph/qa/suites/rados/singleton/all/mon-config.yaml @@ -16,6 +16,8 @@ tasks: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml b/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml index 1a24f4a29..92f5959b5 100644 --- a/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml +++ b/ceph/qa/suites/rados/singleton/all/osd-backfill.yaml @@ -22,6 +22,7 @@ tasks: - \(OSD_ - \(PG_ - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd min pg log entries: 5 diff --git a/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml index 7ade95c9e..6d0955c73 100644 --- a/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml +++ b/ceph/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml @@ -23,6 +23,7 @@ tasks: - \(OSD_ - \(PG_ - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd min pg log entries: 5 diff --git a/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml b/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml index 94ab85a77..9e33b3c39 100644 --- a/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml +++ b/ceph/qa/suites/rados/singleton/all/osd-recovery.yaml @@ -24,6 +24,7 @@ tasks: - \(OBJECT_DEGRADED\) - \(SLOW_OPS\) - slow request + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd min pg log entries: 5 diff --git a/ceph/qa/suites/rados/singleton/all/peer.yaml b/ceph/qa/suites/rados/singleton/all/peer.yaml index 99183c460..f01473b0f 100644 --- a/ceph/qa/suites/rados/singleton/all/peer.yaml +++ b/ceph/qa/suites/rados/singleton/all/peer.yaml @@ -24,4 +24,5 @@ tasks: - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ + - \(POOL_APP_NOT_ENABLED\) - peer: diff --git a/ceph/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml b/ceph/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml index 90fb128f0..e4b48189f 100644 --- a/ceph/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml +++ b/ceph/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml @@ -34,6 +34,7 @@ tasks: - \(REQUEST_SLOW\) - \(TOO_FEW_PGS\) - slow request + - \(POOL_APP_NOT_ENABLED\) - exec: client.0: - ceph progress off diff --git a/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml index 3d64d73ac..a03c2d521 100644 --- a/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml +++ b/ceph/qa/suites/rados/singleton/all/pg-autoscaler.yaml @@ -30,6 +30,7 @@ tasks: - \(REQUEST_SLOW\) - \(TOO_FEW_PGS\) - slow request + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: diff --git a/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml index 2c00192f1..0dd0fb38d 100644 --- a/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml +++ b/ceph/qa/suites/rados/singleton/all/pg-removal-interruption.yaml @@ -21,6 +21,7 @@ tasks: - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ + - \(POOL_APP_NOT_ENABLED\) - exec: client.0: - sudo ceph osd pool create foo 128 128 diff --git a/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml index af556bf41..0c7de00c8 100644 --- a/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml +++ b/ceph/qa/suites/rados/singleton/all/rebuild-mondb.yaml @@ -24,6 +24,7 @@ tasks: - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ + - \(POOL_APP_NOT_ENABLED\) conf: mon: debug auth: 30 diff --git a/ceph/qa/suites/rados/singleton/all/test-crash.yaml b/ceph/qa/suites/rados/singleton/all/test-crash.yaml index deab84a5a..ec227cec7 100644 --- a/ceph/qa/suites/rados/singleton/all/test-crash.yaml +++ b/ceph/qa/suites/rados/singleton/all/test-crash.yaml @@ -10,6 +10,7 @@ tasks: - Reduced data availability - OSD_.*DOWN - \(RECENT_CRASH\) + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: client.0: diff --git a/ceph/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml b/ceph/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml index f4da45ad1..039300f7f 100644 --- a/ceph/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml +++ b/ceph/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml @@ -30,6 +30,7 @@ overrides: - \(REQUEST_SLOW\) - \(TOO_FEW_PGS\) - slow request + - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: diff --git a/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml b/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml index bd3afe047..a9049560d 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash-backfill-full.yaml @@ -38,6 +38,7 @@ tasks: - \(TOO_FEW_PGS\) - \(POOL_BACKFILLFULL\) - slow request + - \(POOL_APP_NOT_ENABLED\) - thrashosds: op_delay: 30 clean_interval: 120 diff --git a/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml b/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml index 515a2832b..52e0cc51e 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash-eio.yaml @@ -35,6 +35,7 @@ tasks: - \(OBJECT_ - \(TOO_FEW_PGS\) - slow request + - \(POOL_APP_NOT_ENABLED\) - thrashosds: op_delay: 30 clean_interval: 120 diff --git a/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml index 1caef6db5..b3b54e173 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml @@ -17,6 +17,7 @@ tasks: - ceph: log-ignorelist: - but it is still running + - \(POOL_APP_NOT_ENABLED\) - thrashosds: op_delay: 30 clean_interval: 120 diff --git a/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml index d61ad0c42..e58fb4ef4 100644 --- a/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml +++ b/ceph/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml @@ -22,6 +22,7 @@ tasks: - slow request - overall HEALTH_ - \(CACHE_POOL_ + - \(POOL_APP_NOT_ENABLED\) - exec: client.0: - sudo ceph osd pool create base 4 diff --git a/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml index 1ff69c300..04d3969b2 100644 --- a/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml +++ b/ceph/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml @@ -30,5 +30,6 @@ tasks: - \(OSD_ - \(PG_ - \(OBJECT_DEGRADED\) + - \(POOL_APP_NOT_ENABLED\) - watch_notify_same_primary: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml index f032d88e7..df0a14500 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml @@ -4,6 +4,7 @@ overrides: - but it is still running - objects unfound and apparently lost - slow request + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .3 diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml index d2c7b85b5..09b6c1782 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml @@ -4,6 +4,7 @@ overrides: - but it is still running - objects unfound and apparently lost - slow request + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .1 diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml index 371ed570b..a36155609 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: mon: osd pool default ec fast read: true diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml index 18843d872..6cf4dc930 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml @@ -4,6 +4,7 @@ overrides: - but it is still running - objects unfound and apparently lost - osd_map_cache_size + - \(POOL_APP_NOT_ENABLED\) conf: mon: mon min osdmap epochs: 2 diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml index 23c9f7d84..794e994f2 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml @@ -8,6 +8,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) tasks: - thrashosds: timeout: 1200 diff --git a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml index 9cbb80dba..15be6b43b 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd scrub min interval: 60 diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml index 94b5d07f8..e18379b5f 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml @@ -4,6 +4,7 @@ overrides: - but it is still running - objects unfound and apparently lost - slow request + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .3 diff --git a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml index a869369fa..00c8689d4 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml @@ -4,6 +4,7 @@ overrides: - but it is still running - objects unfound and apparently lost - slow request + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .1 diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml index 9a708db31..0602f01ad 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .3 diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml index 3728bd8e7..989b83e8f 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .1 diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml index 4ba4eb2a2..5fbb9504b 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: mon: osd pool default ec fast read: true diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml index 2e375c7aa..771d9a104 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) create_rbd_pool: False pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml index a3e66e693..f8c542323 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml @@ -8,6 +8,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) tasks: - thrashosds: timeout: 1200 diff --git a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml index 98f87d6df..ed13bfd3e 100644 --- a/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml +++ b/ceph/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd scrub min interval: 60 diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml index cc232ab88..8820a6cd2 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .3 diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml index c04f9535c..54dc88802 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/default.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd debug reject backfill probability: .3 diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml index 27881d218..c1ab4493e 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml @@ -4,6 +4,7 @@ overrides: - but it is still running - objects unfound and apparently lost - osd_map_cache_size + - \(POOL_APP_NOT_ENABLED\) conf: mon: mon min osdmap epochs: 50 diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml index f18a88711..bb65d6a60 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml @@ -12,6 +12,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) tasks: - thrashosds: timeout: 1200 diff --git a/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml b/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml index 54498d0cf..000550bd8 100644 --- a/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml +++ b/ceph/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml @@ -3,6 +3,7 @@ overrides: log-ignorelist: - but it is still running - objects unfound and apparently lost + - \(POOL_APP_NOT_ENABLED\) conf: osd: osd scrub min interval: 60 diff --git a/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml index 8c9764ade..6885f72aa 100644 --- a/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml @@ -3,6 +3,8 @@ overrides: conf: client.0: admin socket: /var/run/ceph/ceph-$name.asok + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - radosbench: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml index 3f377858a..9ca2576d4 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-agent-big.yaml @@ -2,6 +2,7 @@ overrides: ceph: log-ignorelist: - must scrub before tier agent can activate + - \(POOL_APP_NOT_ENABLED\) conf: osd: # override short_pg_log_entries.yaml (which sets these under [global]) diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml index 29219a7e8..108009e3b 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-agent-small.yaml @@ -2,6 +2,7 @@ overrides: ceph: log-ignorelist: - must scrub before tier agent can activate + - \(POOL_APP_NOT_ENABLED\) conf: osd: # override short_pg_log_entries.yaml (which sets these under [global]) diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml index 808968d6b..f864e1170 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml @@ -2,6 +2,7 @@ overrides: ceph: log-ignorelist: - must scrub before tier agent can activate + - \(POOL_APP_NOT_ENABLED\) conf: osd: # override short_pg_log_entries.yaml (which sets these under [global]) diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml index 4aec8611c..6bf97c692 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml @@ -2,6 +2,7 @@ overrides: ceph: log-ignorelist: - must scrub before tier agent can activate + - \(POOL_APP_NOT_ENABLED\) conf: osd: # override short_pg_log_entries.yaml (which sets these under [global]) diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml index 1e55f573f..574a1f753 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml @@ -2,6 +2,7 @@ overrides: ceph: log-ignorelist: - must scrub before tier agent can activate + - \(POOL_APP_NOT_ENABLED\) conf: osd: # override short_pg_log_entries.yaml (which sets these under [global]) diff --git a/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml index 11401d7e2..6d11f4cf1 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache-snaps.yaml @@ -2,6 +2,7 @@ overrides: ceph: log-ignorelist: - must scrub before tier agent can activate + - \(POOL_APP_NOT_ENABLED\) conf: osd: # override short_pg_log_entries.yaml (which sets these under [global]) diff --git a/ceph/qa/suites/rados/thrash/workloads/cache.yaml b/ceph/qa/suites/rados/thrash/workloads/cache.yaml index c557a6751..bd9daac7a 100644 --- a/ceph/qa/suites/rados/thrash/workloads/cache.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/cache.yaml @@ -2,6 +2,7 @@ overrides: ceph: log-ignorelist: - must scrub before tier agent can activate + - \(POOL_APP_NOT_ENABLED\) conf: osd: # override short_pg_log_entries.yaml (which sets these under [global]) diff --git a/ceph/qa/suites/rados/thrash/workloads/dedup-io-mixed.yaml b/ceph/qa/suites/rados/thrash/workloads/dedup-io-mixed.yaml index 24a41c31f..7758525a6 100644 --- a/ceph/qa/suites/rados/thrash/workloads/dedup-io-mixed.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/dedup-io-mixed.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/thrash/workloads/dedup-io-snaps.yaml b/ceph/qa/suites/rados/thrash/workloads/dedup-io-snaps.yaml index 408d1b5d9..3d2ce3026 100644 --- a/ceph/qa/suites/rados/thrash/workloads/dedup-io-snaps.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/dedup-io-snaps.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml b/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml index 1f0759d96..f60afb809 100644 --- a/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml @@ -2,6 +2,9 @@ override: conf: osd: osd deep scrub update digest min age: 0 + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml b/ceph/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml index 902c4b56a..afdb3794d 100644 --- a/ceph/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml @@ -5,6 +5,8 @@ overrides: debug ms: 1 debug objecter: 20 debug rados: 20 + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - full_sequential: - radosbench: diff --git a/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml b/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml index 071f55e39..32efe0ba9 100644 --- a/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/radosbench.yaml @@ -5,6 +5,8 @@ overrides: debug ms: 1 debug objecter: 20 debug rados: 20 + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - full_sequential: - radosbench: diff --git a/ceph/qa/suites/rados/thrash/workloads/redirect.yaml b/ceph/qa/suites/rados/thrash/workloads/redirect.yaml index bebce845c..14cce6643 100644 --- a/ceph/qa/suites/rados/thrash/workloads/redirect.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/redirect.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml b/ceph/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml index c2787c43b..23226771d 100644 --- a/ceph/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/thrash/workloads/redirect_set_object.yaml b/ceph/qa/suites/rados/thrash/workloads/redirect_set_object.yaml index 06ba60c7c..7fe81435c 100644 --- a/ceph/qa/suites/rados/thrash/workloads/redirect_set_object.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/redirect_set_object.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/thrash/workloads/set-chunks-read.yaml b/ceph/qa/suites/rados/thrash/workloads/set-chunks-read.yaml index 1abbdd757..fa6d6a8b8 100644 --- a/ceph/qa/suites/rados/thrash/workloads/set-chunks-read.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/set-chunks-read.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: diff --git a/ceph/qa/suites/rados/thrash/workloads/small-objects-balanced.yaml b/ceph/qa/suites/rados/thrash/workloads/small-objects-balanced.yaml index 56aaaf543..ece22cd36 100644 --- a/ceph/qa/suites/rados/thrash/workloads/small-objects-balanced.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/small-objects-balanced.yaml @@ -1,6 +1,8 @@ overrides: ceph: crush_tunables: jewel + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/small-objects-localized.yaml b/ceph/qa/suites/rados/thrash/workloads/small-objects-localized.yaml index e06c717f4..ad791ee11 100644 --- a/ceph/qa/suites/rados/thrash/workloads/small-objects-localized.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/small-objects-localized.yaml @@ -1,6 +1,8 @@ overrides: ceph: crush_tunables: jewel + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml b/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml index ee20bc3c5..6f9edfae8 100644 --- a/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/small-objects.yaml @@ -1,6 +1,8 @@ overrides: ceph: crush_tunables: jewel + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-balanced.yaml b/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-balanced.yaml index 5752633c8..ffb6cbc8b 100644 --- a/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-balanced.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-balanced.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-localized.yaml b/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-localized.yaml index d02a5784e..eca004716 100644 --- a/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-localized.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects-localized.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml b/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml index aa82d973a..955327b29 100644 --- a/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml b/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml index 606dcae69..182fc1431 100644 --- a/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml +++ b/ceph/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml @@ -1,3 +1,7 @@ +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - rados: clients: [client.0] diff --git a/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml b/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml index 9263f2a83..1cdd8a688 100644 --- a/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml +++ b/ceph/qa/suites/rados/valgrind-leaks/1-start.yaml @@ -11,6 +11,7 @@ overrides: log-ignorelist: - overall HEALTH_ - \(PG_ + - \(POOL_APP_NOT_ENABLED\) conf: global: osd heartbeat grace: 40 diff --git a/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml b/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml index 163bc2c08..0236326f3 100644 --- a/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml +++ b/ceph/qa/suites/rados/verify/tasks/rados_cls_all.yaml @@ -4,6 +4,8 @@ overrides: osd: osd_class_load_list: "*" osd_class_default_list: "*" + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: clients: diff --git a/ceph/qa/suites/rbd/basic/cachepool/small.yaml b/ceph/qa/suites/rbd/basic/cachepool/small.yaml deleted file mode 100644 index bad95eadd..000000000 --- a/ceph/qa/suites/rbd/basic/cachepool/small.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 diff --git a/ceph/qa/suites/rbd/basic/conf b/ceph/qa/suites/rbd/basic/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/basic/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml b/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml index 3dbb69df4..5bb734d24 100644 --- a/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml +++ b/ceph/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/cli/conf b/ceph/qa/suites/rbd/cli/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/cli/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/data-pool b/ceph/qa/suites/rbd/cli/data-pool new file mode 120000 index 000000000..3df827572 --- /dev/null +++ b/ceph/qa/suites/rbd/cli/data-pool @@ -0,0 +1 @@ +.qa/rbd/data-pool/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml deleted file mode 100644 index db289c7e7..000000000 --- a/ceph/qa/suites/rbd/cli/pool/ec-data-pool.yaml +++ /dev/null @@ -1,27 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile - - sudo ceph osd pool set datapool allow_ec_overwrites true - - rbd pool init datapool - -overrides: - thrashosds: - bdev_inject_crash: 2 - bdev_inject_crash_probability: .5 - ceph: - fs: xfs - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NO_HIT_SET\) - conf: - client: - rbd default data pool: datapool - osd: # force bluestore since it's required for ec overwrites - osd objectstore: bluestore - bluestore block size: 96636764160 - enable experimental unrecoverable data corrupting features: "*" - osd debug randomize hobject sort order: false -# this doesn't work with failures bc the log writes are not atomic across the two backends -# bluestore bluefs env mirror: true diff --git a/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml deleted file mode 100644 index bad95eadd..000000000 --- a/ceph/qa/suites/rbd/cli/pool/small-cache-pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 diff --git a/ceph/qa/suites/rbd/cli/workloads/rbd_support_module_recovery.yaml b/ceph/qa/suites/rbd/cli/workloads/rbd_support_module_recovery.yaml new file mode 100644 index 000000000..aa4d0001f --- /dev/null +++ b/ceph/qa/suites/rbd/cli/workloads/rbd_support_module_recovery.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + mgr: + debug rbd: 20 +tasks: +- install: + extra_system_packages: + - fio +- workunit: + clients: + client.0: + - rbd/rbd_support_module_recovery.sh diff --git a/ceph/qa/suites/rbd/cli_v1/conf b/ceph/qa/suites/rbd/cli_v1/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/cli_v1/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml deleted file mode 100644 index bad95eadd..000000000 --- a/ceph/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 diff --git a/ceph/qa/suites/rbd/encryption/conf b/ceph/qa/suites/rbd/encryption/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/encryption/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/encryption/data-pool b/ceph/qa/suites/rbd/encryption/data-pool new file mode 120000 index 000000000..3df827572 --- /dev/null +++ b/ceph/qa/suites/rbd/encryption/data-pool @@ -0,0 +1 @@ +.qa/rbd/data-pool/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml b/ceph/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml deleted file mode 100644 index a0f88b409..000000000 --- a/ceph/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml +++ /dev/null @@ -1,21 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it - - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 - - rbd pool init rbd diff --git a/ceph/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml b/ceph/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml deleted file mode 100644 index c5647dba1..000000000 --- a/ceph/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd pool create datapool 4 - - rbd pool init datapool - -overrides: - ceph: - conf: - client: - rbd default data pool: datapool diff --git a/ceph/qa/suites/rbd/encryption/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/encryption/pool/small-cache-pool.yaml deleted file mode 100644 index bad95eadd..000000000 --- a/ceph/qa/suites/rbd/encryption/pool/small-cache-pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 diff --git a/ceph/qa/suites/rbd/immutable-object-cache/conf b/ceph/qa/suites/rbd/immutable-object-cache/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/immutable-object-cache/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/iscsi/conf b/ceph/qa/suites/rbd/iscsi/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/iscsi/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/conf b/ceph/qa/suites/rbd/librbd/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/librbd/data-pool b/ceph/qa/suites/rbd/librbd/data-pool new file mode 120000 index 000000000..3df827572 --- /dev/null +++ b/ceph/qa/suites/rbd/librbd/data-pool @@ -0,0 +1 @@ +.qa/rbd/data-pool/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/pool/.qa b/ceph/qa/suites/rbd/librbd/extra-conf/.qa similarity index 100% rename from ceph/qa/suites/rbd/qemu/pool/.qa rename to ceph/qa/suites/rbd/librbd/extra-conf/.qa diff --git a/ceph/qa/suites/rbd/librbd/config/copy-on-read.yaml b/ceph/qa/suites/rbd/librbd/extra-conf/copy-on-read.yaml similarity index 100% rename from ceph/qa/suites/rbd/librbd/config/copy-on-read.yaml rename to ceph/qa/suites/rbd/librbd/extra-conf/copy-on-read.yaml diff --git a/ceph/qa/suites/rbd/qemu/pool/none.yaml b/ceph/qa/suites/rbd/librbd/extra-conf/none.yaml similarity index 100% rename from ceph/qa/suites/rbd/qemu/pool/none.yaml rename to ceph/qa/suites/rbd/librbd/extra-conf/none.yaml diff --git a/ceph/qa/suites/rbd/librbd/config/permit-partial-discard.yaml b/ceph/qa/suites/rbd/librbd/extra-conf/permit-partial-discard.yaml similarity index 100% rename from ceph/qa/suites/rbd/librbd/config/permit-partial-discard.yaml rename to ceph/qa/suites/rbd/librbd/extra-conf/permit-partial-discard.yaml diff --git a/ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml b/ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml deleted file mode 100644 index f39a5bb4c..000000000 --- a/ceph/qa/suites/rbd/librbd/pool/ec-data-pool.yaml +++ /dev/null @@ -1,24 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile - - sudo ceph osd pool set datapool allow_ec_overwrites true - - rbd pool init datapool - -overrides: - thrashosds: - bdev_inject_crash: 2 - bdev_inject_crash_probability: .5 - ceph: - fs: xfs - conf: - client: - rbd default data pool: datapool - osd: # force bluestore since it's required for ec overwrites - osd objectstore: bluestore - bluestore block size: 96636764160 - enable experimental unrecoverable data corrupting features: "*" - osd debug randomize hobject sort order: false -# this doesn't work with failures bc the log writes are not atomic across the two backends -# bluestore bluefs env mirror: true diff --git a/ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml b/ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml deleted file mode 100644 index c5647dba1..000000000 --- a/ceph/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd pool create datapool 4 - - rbd pool init datapool - -overrides: - ceph: - conf: - client: - rbd default data pool: datapool diff --git a/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml deleted file mode 100644 index bad95eadd..000000000 --- a/ceph/qa/suites/rbd/librbd/pool/small-cache-pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 diff --git a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml index ac9dd3cfe..eb63fd771 100644 --- a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml +++ b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml index c410fbcb0..ec4194598 100644 --- a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml +++ b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml index 3a121de3e..6c3686806 100644 --- a/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml +++ b/ceph/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/maintenance/conf b/ceph/qa/suites/rbd/maintenance/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/maintenance/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/migration/5-data-pool b/ceph/qa/suites/rbd/migration/5-data-pool new file mode 120000 index 000000000..3df827572 --- /dev/null +++ b/ceph/qa/suites/rbd/migration/5-data-pool @@ -0,0 +1 @@ +.qa/rbd/data-pool/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/migration/5-pool/ec-data-pool.yaml b/ceph/qa/suites/rbd/migration/5-pool/ec-data-pool.yaml deleted file mode 100644 index f39a5bb4c..000000000 --- a/ceph/qa/suites/rbd/migration/5-pool/ec-data-pool.yaml +++ /dev/null @@ -1,24 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile - - sudo ceph osd pool set datapool allow_ec_overwrites true - - rbd pool init datapool - -overrides: - thrashosds: - bdev_inject_crash: 2 - bdev_inject_crash_probability: .5 - ceph: - fs: xfs - conf: - client: - rbd default data pool: datapool - osd: # force bluestore since it's required for ec overwrites - osd objectstore: bluestore - bluestore block size: 96636764160 - enable experimental unrecoverable data corrupting features: "*" - osd debug randomize hobject sort order: false -# this doesn't work with failures bc the log writes are not atomic across the two backends -# bluestore bluefs env mirror: true diff --git a/ceph/qa/suites/rbd/migration/5-pool/replicated-data-pool.yaml b/ceph/qa/suites/rbd/migration/5-pool/replicated-data-pool.yaml deleted file mode 100644 index c5647dba1..000000000 --- a/ceph/qa/suites/rbd/migration/5-pool/replicated-data-pool.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd pool create datapool 4 - - rbd pool init datapool - -overrides: - ceph: - conf: - client: - rbd default data pool: datapool diff --git a/ceph/qa/suites/rbd/migration/conf b/ceph/qa/suites/rbd/migration/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/migration/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror-thrash/conf b/ceph/qa/suites/rbd/mirror-thrash/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/mirror-thrash/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/mirror/conf b/ceph/qa/suites/rbd/mirror/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/mirror/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/nbd/conf b/ceph/qa/suites/rbd/nbd/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/nbd/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/home/conf b/ceph/qa/suites/rbd/pwl-cache/home/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/home/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/pwl-cache/tmpfs/conf b/ceph/qa/suites/rbd/pwl-cache/tmpfs/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/pwl-cache/tmpfs/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/conf b/ceph/qa/suites/rbd/qemu/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/data-pool b/ceph/qa/suites/rbd/qemu/data-pool new file mode 120000 index 000000000..3df827572 --- /dev/null +++ b/ceph/qa/suites/rbd/qemu/data-pool @@ -0,0 +1 @@ +.qa/rbd/data-pool/ \ No newline at end of file diff --git a/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml b/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml deleted file mode 100644 index a0f88b409..000000000 --- a/ceph/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml +++ /dev/null @@ -1,21 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it - - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 - - rbd pool init rbd diff --git a/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml b/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml deleted file mode 100644 index f39a5bb4c..000000000 --- a/ceph/qa/suites/rbd/qemu/pool/ec-data-pool.yaml +++ /dev/null @@ -1,24 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 - - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile - - sudo ceph osd pool set datapool allow_ec_overwrites true - - rbd pool init datapool - -overrides: - thrashosds: - bdev_inject_crash: 2 - bdev_inject_crash_probability: .5 - ceph: - fs: xfs - conf: - client: - rbd default data pool: datapool - osd: # force bluestore since it's required for ec overwrites - osd objectstore: bluestore - bluestore block size: 96636764160 - enable experimental unrecoverable data corrupting features: "*" - osd debug randomize hobject sort order: false -# this doesn't work with failures bc the log writes are not atomic across the two backends -# bluestore bluefs env mirror: true diff --git a/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml b/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml deleted file mode 100644 index c5647dba1..000000000 --- a/ceph/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- exec: - client.0: - - sudo ceph osd pool create datapool 4 - - rbd pool init datapool - -overrides: - ceph: - conf: - client: - rbd default data pool: datapool diff --git a/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml b/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml deleted file mode 100644 index bad95eadd..000000000 --- a/ceph/qa/suites/rbd/qemu/pool/small-cache-pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 diff --git a/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml b/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml index b41f92d52..48156c7a0 100644 --- a/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml +++ b/ceph/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml @@ -6,8 +6,6 @@ roles: tasks: - install: - ceph: - log-ignorelist: - - 'application not enabled' - workunit: timeout: 30m clients: diff --git a/ceph/qa/suites/rbd/singleton-bluestore/conf b/ceph/qa/suites/rbd/singleton-bluestore/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/singleton-bluestore/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml b/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml index 954760159..4120d21c9 100644 --- a/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml +++ b/ceph/qa/suites/rbd/singleton/all/rbd_mirror.yaml @@ -7,7 +7,6 @@ tasks: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: [rbd/test_rbd_mirror.sh] diff --git a/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml b/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml index b06ede605..782b02141 100644 --- a/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml +++ b/ceph/qa/suites/rbd/singleton/all/rbd_tasks.yaml @@ -4,10 +4,6 @@ tasks: - install: - ceph: fs: xfs - log-ignorelist: - - overall HEALTH_ - - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: [rbd/test_rbd_tasks.sh] diff --git a/ceph/qa/suites/rbd/singleton/conf b/ceph/qa/suites/rbd/singleton/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/singleton/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/thrash/conf b/ceph/qa/suites/rbd/thrash/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/thrash/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml b/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml deleted file mode 100644 index b434e28be..000000000 --- a/ceph/qa/suites/rbd/thrash/thrashers/cache.yaml +++ /dev/null @@ -1,21 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - but it is still running - - objects unfound and apparently lost - - overall HEALTH_ - - \(CACHE_POOL_NEAR_FULL\) - - \(CACHE_POOL_NO_HIT_SET\) -tasks: -- exec: - client.0: - - sudo ceph osd pool create cache 4 - - sudo ceph osd tier add rbd cache - - sudo ceph osd tier cache-mode cache writeback - - sudo ceph osd tier set-overlay rbd cache - - sudo ceph osd pool set cache hit_set_type bloom - - sudo ceph osd pool set cache hit_set_count 8 - - sudo ceph osd pool set cache hit_set_period 60 - - sudo ceph osd pool set cache target_max_objects 250 -- thrashosds: - timeout: 1200 diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml index c410fbcb0..ec4194598 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml index ce47fc46f..26e20522c 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml @@ -10,7 +10,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) conf: diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml index 3a121de3e..6c3686806 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml index ac9dd3cfe..eb63fd771 100644 --- a/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml +++ b/ceph/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/valgrind/conf b/ceph/qa/suites/rbd/valgrind/conf new file mode 120000 index 000000000..4bc0fe86c --- /dev/null +++ b/ceph/qa/suites/rbd/valgrind/conf @@ -0,0 +1 @@ +.qa/rbd/conf \ No newline at end of file diff --git a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml index ac9dd3cfe..eb63fd771 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml index c410fbcb0..ec4194598 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml index 3a121de3e..6c3686806 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: diff --git a/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml b/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml index 251de1c1f..ae5a83c30 100644 --- a/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml +++ b/ceph/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml @@ -3,7 +3,6 @@ overrides: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: clients: diff --git a/ceph/qa/suites/rgw/cloud-transition/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/cloud-transition/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/cloud-transition/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/crypt/2-kms/barbican.yaml b/ceph/qa/suites/rgw/crypt/2-kms/barbican.yaml index bc059c0a3..0c75a131c 100644 --- a/ceph/qa/suites/rgw/crypt/2-kms/barbican.yaml +++ b/ceph/qa/suites/rgw/crypt/2-kms/barbican.yaml @@ -27,7 +27,11 @@ tasks: - tox: [ client.0 ] - keystone: client.0: - force-branch: stable/xena + force-branch: stable/2023.1 + services: + - name: swift + type: object-store + description: Swift Service projects: - name: rgwcrypt description: Encryption Tenant @@ -62,10 +66,6 @@ tasks: - name: creator user: s3-user project: s3 - services: - - name: swift - type: object-store - description: Swift Service - barbican: client.0: force-branch: stable/xena diff --git a/ceph/qa/suites/rgw/crypt/ubuntu_latest.yaml b/ceph/qa/suites/rgw/crypt/ubuntu_latest.yaml new file mode 120000 index 000000000..fe01dafd4 --- /dev/null +++ b/ceph/qa/suites/rgw/crypt/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_latest.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/dbstore/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/dbstore/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/dbstore/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/hadoop-s3a/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/hadoop-s3a/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/hadoop-s3a/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/lifecycle/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/lifecycle/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/lifecycle/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/notifications/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/notifications/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/notifications/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/service-token/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/service-token/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/service-token/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/tempest/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/tempest/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/tempest/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/thrash/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/thrash/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/thrash/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/tools/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/tools/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/tools/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/upgrade/1-install/pacific/overrides.yaml b/ceph/qa/suites/rgw/upgrade/1-install/pacific/overrides.yaml index ab06356f6..d882523e0 100644 --- a/ceph/qa/suites/rgw/upgrade/1-install/pacific/overrides.yaml +++ b/ceph/qa/suites/rgw/upgrade/1-install/pacific/overrides.yaml @@ -1,3 +1,3 @@ overrides: ragweed: - default-branch: ceph-master # ceph-pacific doesn't have tox, but tests are the same + default-branch: ceph-reef # ceph-pacific doesn't have tox, but tests are the same diff --git a/ceph/qa/suites/rgw/upgrade/1-install/quincy/overrides.yaml b/ceph/qa/suites/rgw/upgrade/1-install/quincy/overrides.yaml index ef9110a95..02508d329 100644 --- a/ceph/qa/suites/rgw/upgrade/1-install/quincy/overrides.yaml +++ b/ceph/qa/suites/rgw/upgrade/1-install/quincy/overrides.yaml @@ -1,3 +1,3 @@ overrides: ragweed: - default-branch: ceph-master # ceph-quincy doesn't have tox, but tests are the same + default-branch: ceph-reef # ceph-quincy doesn't have tox, but tests are the same diff --git a/ceph/qa/suites/rgw/upgrade/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/upgrade/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/upgrade/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/rgw/verify/tasks/bucket-check.yaml b/ceph/qa/suites/rgw/verify/tasks/bucket-check.yaml new file mode 100644 index 000000000..4955d41c6 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/tasks/bucket-check.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rgw/run-bucket-check.sh diff --git a/ceph/qa/suites/rgw/verify/tasks/mp_reupload.yaml b/ceph/qa/suites/rgw/verify/tasks/mp_reupload.yaml new file mode 100644 index 000000000..d817a1c35 --- /dev/null +++ b/ceph/qa/suites/rgw/verify/tasks/mp_reupload.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rgw/test_rgw_s3_mp_reupload.sh diff --git a/ceph/qa/suites/rgw/website/ignore-pg-availability.yaml b/ceph/qa/suites/rgw/website/ignore-pg-availability.yaml new file mode 120000 index 000000000..32340b1fa --- /dev/null +++ b/ceph/qa/suites/rgw/website/ignore-pg-availability.yaml @@ -0,0 +1 @@ +.qa/rgw/ignore-pg-availability.yaml \ No newline at end of file diff --git a/ceph/qa/suites/smoke/basic/supported-all-distro b/ceph/qa/suites/smoke/basic/supported-all-distro new file mode 120000 index 000000000..ca82dde58 --- /dev/null +++ b/ceph/qa/suites/smoke/basic/supported-all-distro @@ -0,0 +1 @@ +.qa/distros/supported-all-distro \ No newline at end of file diff --git a/ceph/qa/suites/smoke/basic/supported-random-distro$ b/ceph/qa/suites/smoke/basic/supported-random-distro$ deleted file mode 120000 index 072fd8385..000000000 --- a/ceph/qa/suites/smoke/basic/supported-random-distro$ +++ /dev/null @@ -1 +0,0 @@ -../../../distros/supported-random-distro$/ \ No newline at end of file diff --git a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_blogbench.yaml b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_blogbench.yaml index 0e51ed0e4..bc40416ff 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_blogbench.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_blogbench.yaml @@ -1,6 +1,8 @@ tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - ceph-fuse: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_fsstress.yaml b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_fsstress.yaml index 8f0bc592c..e21286d59 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_fsstress.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_fsstress.yaml @@ -1,5 +1,7 @@ tasks: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - ceph-fuse: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_iozone.yaml b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_iozone.yaml index fedc23081..871606ab8 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_iozone.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_iozone.yaml @@ -1,5 +1,7 @@ tasks: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - ceph-fuse: [client.0] - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_pjd.yaml b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_pjd.yaml index 03501ecd3..0f4469c93 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_pjd.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_pjd.yaml @@ -1,6 +1,8 @@ tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) conf: mds: debug mds: 20 diff --git a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_direct_io.yaml b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_direct_io.yaml index 6d3ccdf05..3720d418c 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_direct_io.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_direct_io.yaml @@ -5,6 +5,8 @@ overrides: ms die on skipped message: false tasks: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - kclient: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_dbench.yaml b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_dbench.yaml index 2a9b991a9..256d1f1fe 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_dbench.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_dbench.yaml @@ -6,6 +6,8 @@ overrides: tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - kclient: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_fsstress.yaml b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_fsstress.yaml index 96a7a215d..649ea8e14 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_fsstress.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_fsstress.yaml @@ -6,6 +6,8 @@ overrides: tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - kclient: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_pjd.yaml b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_pjd.yaml index 60723a416..7dea45b80 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_pjd.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_pjd.yaml @@ -6,6 +6,8 @@ overrides: tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - kclient: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/libcephfs_interface_tests.yaml b/ceph/qa/suites/smoke/basic/tasks/test/libcephfs_interface_tests.yaml index 2e332f013..3be975b6b 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/libcephfs_interface_tests.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/libcephfs_interface_tests.yaml @@ -9,6 +9,8 @@ overrides: debug mds: 20 tasks: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - ceph-fuse: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/rados_cls_all.yaml b/ceph/qa/suites/smoke/basic/tasks/test/rados_cls_all.yaml index 07e28a7a4..c4d55728c 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/rados_cls_all.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/rados_cls_all.yaml @@ -7,6 +7,8 @@ overrides: tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - workunit: clients: client.0: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/rbd_cli_import_export.yaml b/ceph/qa/suites/smoke/basic/tasks/test/rbd_cli_import_export.yaml index 9c2ad68a3..79ff9418d 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/rbd_cli_import_export.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/rbd_cli_import_export.yaml @@ -1,6 +1,8 @@ tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - ceph-fuse: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/rbd_python_api_tests.yaml b/ceph/qa/suites/smoke/basic/tasks/test/rbd_python_api_tests.yaml index a3cfaa2bc..73e64bb2c 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/rbd_python_api_tests.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/rbd_python_api_tests.yaml @@ -5,6 +5,8 @@ overrides: - python3-pytest tasks: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - ceph-fuse: - workunit: clients: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/rbd_workunit_suites_iozone.yaml b/ceph/qa/suites/smoke/basic/tasks/test/rbd_workunit_suites_iozone.yaml index 1cbaebc89..8602447aa 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/rbd_workunit_suites_iozone.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/rbd_workunit_suites_iozone.yaml @@ -7,6 +7,8 @@ overrides: rbd default features: 5 tasks: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - rbd: all: image_size: 20480 diff --git a/ceph/qa/suites/smoke/basic/tasks/test/rgw_ec_s3tests.yaml b/ceph/qa/suites/smoke/basic/tasks/test/rgw_ec_s3tests.yaml index c914886d6..3214fd900 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/rgw_ec_s3tests.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/rgw_ec_s3tests.yaml @@ -4,6 +4,8 @@ overrides: cache-pools: true tasks: - ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - rgw: [client.0] - tox: [client.0] - s3tests: diff --git a/ceph/qa/suites/smoke/basic/tasks/test/rgw_s3tests.yaml b/ceph/qa/suites/smoke/basic/tasks/test/rgw_s3tests.yaml index 23039e51c..337452f75 100644 --- a/ceph/qa/suites/smoke/basic/tasks/test/rgw_s3tests.yaml +++ b/ceph/qa/suites/smoke/basic/tasks/test/rgw_s3tests.yaml @@ -1,6 +1,8 @@ tasks: - ceph: fs: xfs + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) - rgw: [client.0] - tox: [client.0] - s3tests: diff --git a/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_api.yaml b/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_api.yaml index 6e67e9cf1..6a0242b91 100644 --- a/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_api.yaml +++ b/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_api.yaml @@ -9,4 +9,6 @@ workload: clients: client.0: - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" - print: "**** done end test_rbd_api.yaml" diff --git a/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_python.yaml b/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_python.yaml index 59ed40474..8d1f0fd0f 100644 --- a/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_python.yaml +++ b/ceph/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_python.yaml @@ -14,5 +14,7 @@ workload: clients: client.0: - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "61" - print: "**** done end test_rbd_python.yaml" diff --git a/ceph/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/rbd_api.yaml b/ceph/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/rbd_api.yaml index 400cc5f7d..a7060c0ac 100644 --- a/ceph/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/rbd_api.yaml +++ b/ceph/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/rbd_api.yaml @@ -7,4 +7,6 @@ first-half-tasks: clients: client.0: - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" - print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/ceph/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/rbd_api.yaml b/ceph/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/rbd_api.yaml index a3f5c9e39..8b52658c4 100644 --- a/ceph/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/rbd_api.yaml +++ b/ceph/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/rbd_api.yaml @@ -7,4 +7,6 @@ stress-tasks: clients: client.0: - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" - print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/ceph/qa/suites/upgrade/quincy-x/filestore-remove-check/0-cluster/start.yaml b/ceph/qa/suites/upgrade/quincy-x/filestore-remove-check/0-cluster/start.yaml index 4663e2527..b4b6f4d90 100644 --- a/ceph/qa/suites/upgrade/quincy-x/filestore-remove-check/0-cluster/start.yaml +++ b/ceph/qa/suites/upgrade/quincy-x/filestore-remove-check/0-cluster/start.yaml @@ -19,6 +19,7 @@ overrides: - \(MGR_DOWN\) - slow request - \(MON_MSGR2_NOT_ENABLED\) + - \(POOL_APP_NOT_ENABLED\) conf: global: enable experimental unrecoverable data corrupting features: "*" diff --git a/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_api.yaml b/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_api.yaml index d80983135..c871d4c8c 100644 --- a/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_api.yaml +++ b/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_api.yaml @@ -9,4 +9,6 @@ workload: clients: client.0: - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" - print: "**** done end test_rbd_api.yaml" diff --git a/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_python.yaml b/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_python.yaml index c30560895..3ae98ed1e 100644 --- a/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_python.yaml +++ b/ceph/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_python.yaml @@ -14,5 +14,7 @@ workload: clients: client.0: - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "61" - print: "**** done end test_rbd_python.yaml" diff --git a/ceph/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/rbd_api.yaml b/ceph/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/rbd_api.yaml index 87b89ec19..0fa5d0944 100644 --- a/ceph/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/rbd_api.yaml +++ b/ceph/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/rbd_api.yaml @@ -7,4 +7,6 @@ first-half-tasks: clients: client.0: - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" - print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/ceph/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/rbd_api.yaml b/ceph/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/rbd_api.yaml index 1d4714ab4..cc4f29a08 100644 --- a/ceph/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/rbd_api.yaml +++ b/ceph/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/rbd_api.yaml @@ -7,4 +7,6 @@ stress-tasks: clients: client.0: - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" - print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/ceph/qa/tasks/barbican.py b/ceph/qa/tasks/barbican.py index 70bd73405..771304fba 100644 --- a/ceph/qa/tasks/barbican.py +++ b/ceph/qa/tasks/barbican.py @@ -136,9 +136,8 @@ def set_authtoken_params(ctx, cclient, cconfig): ['sed', '-i', '/[[]filter:authtoken]/{p;s##'+'auth_uri = {}'.format(url)+'#;}', 'etc/barbican/barbican-api-paste.ini']) - admin_host, admin_port = ctx.keystone.admin_endpoints[keystone_role] - admin_url = 'http://{host}:{port}/v3'.format(host=admin_host, - port=admin_port) + admin_url = 'http://{host}:{port}/v3'.format(host=public_host, + port=public_port) run_in_barbican_dir(ctx, cclient, ['sed', '-i', '/[[]filter:authtoken]/{p;s##'+'auth_url = {}'.format(admin_url)+'#;}', diff --git a/ceph/qa/tasks/cephadm.py b/ceph/qa/tasks/cephadm.py index 84685b824..e9fc25d6a 100644 --- a/ceph/qa/tasks/cephadm.py +++ b/ceph/qa/tasks/cephadm.py @@ -441,6 +441,70 @@ def pull_image(ctx, config): finally: pass +@contextlib.contextmanager +def setup_ca_signed_keys(ctx, config): + # generate our ca key + cluster_name = config['cluster'] + bootstrap_remote = ctx.ceph[cluster_name].bootstrap_remote + bootstrap_remote.run(args=[ + 'sudo', 'ssh-keygen', '-t', 'rsa', '-f', '/root/ca-key', '-N', '' + ]) + + # not using read_file here because it runs dd as a non-root + # user and would hit permission issues + r = bootstrap_remote.run(args=[ + 'sudo', 'cat', '/root/ca-key.pub' + ], stdout=StringIO()) + ca_key_pub_contents = r.stdout.getvalue() + + # make CA key accepted on each host + for remote in ctx.cluster.remotes.keys(): + # write key to each host's /etc/ssh dir + remote.run(args=[ + 'sudo', 'echo', ca_key_pub_contents, + run.Raw('|'), + 'sudo', 'tee', '-a', '/etc/ssh/ca-key.pub', + ]) + # make sshd accept the CA signed key + remote.run(args=[ + 'sudo', 'echo', 'TrustedUserCAKeys /etc/ssh/ca-key.pub', + run.Raw('|'), + 'sudo', 'tee', '-a', '/etc/ssh/sshd_config', + run.Raw('&&'), + 'sudo', 'systemctl', 'restart', 'sshd', + ]) + + # generate a new key pair and sign the pub key to make a cert + bootstrap_remote.run(args=[ + 'sudo', 'ssh-keygen', '-t', 'rsa', '-f', '/root/cephadm-ssh-key', '-N', '', + run.Raw('&&'), + 'sudo', 'ssh-keygen', '-s', '/root/ca-key', '-I', 'user_root', '-n', 'root', '-V', '+52w', '/root/cephadm-ssh-key', + ]) + + # for debugging, to make sure this setup has worked as intended + for remote in ctx.cluster.remotes.keys(): + remote.run(args=[ + 'sudo', 'cat', '/etc/ssh/ca-key.pub' + ]) + remote.run(args=[ + 'sudo', 'cat', '/etc/ssh/sshd_config', + run.Raw('|'), + 'grep', 'TrustedUserCAKeys' + ]) + bootstrap_remote.run(args=[ + 'sudo', 'ls', '/root/' + ]) + + ctx.ca_signed_key_info = {} + ctx.ca_signed_key_info['ca-key'] = '/root/ca-key' + ctx.ca_signed_key_info['ca-key-pub'] = '/root/ca-key.pub' + ctx.ca_signed_key_info['private-key'] = '/root/cephadm-ssh-key' + ctx.ca_signed_key_info['ca-signed-cert'] = '/root/cephadm-ssh-key-cert.pub' + + try: + yield + finally: + pass @contextlib.contextmanager def ceph_bootstrap(ctx, config): @@ -510,8 +574,20 @@ def ceph_bootstrap(ctx, config): '--output-config', '/etc/ceph/{}.conf'.format(cluster_name), '--output-keyring', '/etc/ceph/{}.client.admin.keyring'.format(cluster_name), - '--output-pub-ssh-key', '{}/{}.pub'.format(testdir, cluster_name), ] + + if not config.get("use-ca-signed-key", False): + cmd += ['--output-pub-ssh-key', '{}/{}.pub'.format(testdir, cluster_name)] + else: + # ctx.ca_signed_key_info should have been set up in + # setup_ca_signed_keys function which we expect to have + # run before bootstrap if use-ca-signed-key is true + signed_key_info = ctx.ca_signed_key_info + cmd += [ + "--ssh-private-key", signed_key_info['private-key'], + "--ssh-signed-cert", signed_key_info['ca-signed-cert'], + ] + if config.get("no_cgroups_split") is True: cmd.insert(cmd.index("bootstrap"), "--no-cgroups-split") @@ -562,21 +638,22 @@ def ceph_bootstrap(ctx, config): ctx.ceph[cluster_name].mon_keyring = \ bootstrap_remote.read_file(f'/var/lib/ceph/{fsid}/mon.{first_mon}/keyring', sudo=True) - # fetch ssh key, distribute to additional nodes - log.info('Fetching pub ssh key...') - ssh_pub_key = bootstrap_remote.read_file( - f'{testdir}/{cluster_name}.pub').decode('ascii').strip() + if not config.get("use-ca-signed-key", False): + # fetch ssh key, distribute to additional nodes + log.info('Fetching pub ssh key...') + ssh_pub_key = bootstrap_remote.read_file( + f'{testdir}/{cluster_name}.pub').decode('ascii').strip() - log.info('Installing pub ssh key for root users...') - ctx.cluster.run(args=[ - 'sudo', 'install', '-d', '-m', '0700', '/root/.ssh', - run.Raw('&&'), - 'echo', ssh_pub_key, - run.Raw('|'), - 'sudo', 'tee', '-a', '/root/.ssh/authorized_keys', - run.Raw('&&'), - 'sudo', 'chmod', '0600', '/root/.ssh/authorized_keys', - ]) + log.info('Installing pub ssh key for root users...') + ctx.cluster.run(args=[ + 'sudo', 'install', '-d', '-m', '0700', '/root/.ssh', + run.Raw('&&'), + 'echo', ssh_pub_key, + run.Raw('|'), + 'sudo', 'tee', '-a', '/root/.ssh/authorized_keys', + run.Raw('&&'), + 'sudo', 'chmod', '0600', '/root/.ssh/authorized_keys', + ]) # set options if config.get('allow_ptrace', True): @@ -1636,16 +1713,18 @@ def task(ctx, config): with contextutil.nested( #if the cluster is already bootstrapped bypass corresponding methods - lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\ + lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped) \ else initialize_config(ctx=ctx, config=config), lambda: ceph_initial(), lambda: normalize_hostnames(ctx=ctx), - lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\ + lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped) \ else download_cephadm(ctx=ctx, config=config, ref=ref), lambda: ceph_log(ctx=ctx, config=config), lambda: ceph_crash(ctx=ctx, config=config), lambda: pull_image(ctx=ctx, config=config), - lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\ + lambda: _bypass() if not (config.get('use-ca-signed-key', False)) \ + else setup_ca_signed_keys(ctx, config), + lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped) \ else ceph_bootstrap(ctx, config), lambda: crush_setup(ctx=ctx, config=config), lambda: ceph_mons(ctx=ctx, config=config), diff --git a/ceph/qa/tasks/cephfs/cephfs_test_case.py b/ceph/qa/tasks/cephfs/cephfs_test_case.py index 129b84790..d2688929c 100644 --- a/ceph/qa/tasks/cephfs/cephfs_test_case.py +++ b/ceph/qa/tasks/cephfs/cephfs_test_case.py @@ -13,6 +13,12 @@ from teuthology.exceptions import CommandFailedError log = logging.getLogger(__name__) +def classhook(m): + def dec(cls): + getattr(cls, m)() + return cls + return dec + def for_teuthology(f): """ Decorator that adds an "is_for_teuthology" attribute to the wrapped function diff --git a/ceph/qa/tasks/cephfs/kernel_mount.py b/ceph/qa/tasks/cephfs/kernel_mount.py index 750b6b533..89f6b6639 100644 --- a/ceph/qa/tasks/cephfs/kernel_mount.py +++ b/ceph/qa/tasks/cephfs/kernel_mount.py @@ -68,7 +68,10 @@ class KernelMount(CephFSMount): self.enable_dynamic_debug() self.ctx[f'kmount_count.{self.client_remote.hostname}'] = kmount_count + 1 - self.gather_mount_info() + try: + self.gather_mount_info() + except: + log.warn('failed to fetch mount info - tests depending on mount addr/inst may fail!') def gather_mount_info(self): self.id = self._get_global_id() diff --git a/ceph/qa/tasks/cephfs/mount.py b/ceph/qa/tasks/cephfs/mount.py index 23aa2f837..4a8187406 100644 --- a/ceph/qa/tasks/cephfs/mount.py +++ b/ceph/qa/tasks/cephfs/mount.py @@ -186,6 +186,12 @@ class CephFSMount(object): sudo=True).decode()) def is_blocked(self): + if not self.addr: + # can't infer if our addr is blocklisted - let the caller try to + # umount without lazy/force. If the client was blocklisted, then + # the umount would be stuck and the test would fail on timeout. + # happens only with Ubuntu 20.04 (missing kclient patches :/). + return False self.fs = Filesystem(self.ctx, name=self.cephfs_name) try: @@ -935,7 +941,7 @@ class CephFSMount(object): )) p.wait() - def open_background(self, basename="background_file", write=True): + def open_background(self, basename="background_file", write=True, content="content"): """ Open a file for writing, then block such that the client will hold a capability. @@ -952,12 +958,11 @@ class CephFSMount(object): import time with open("{path}", 'w') as f: - f.write('content') + f.write("{content}") f.flush() - f.write('content2') while True: time.sleep(1) - """).format(path=path) + """).format(path=path, content=content) else: pyscript = dedent(""" import time @@ -973,7 +978,10 @@ class CephFSMount(object): # This wait would not be sufficient if the file had already # existed, but it's simple and in practice users of open_background # are not using it on existing files. - self.wait_for_visible(basename) + if write: + self.wait_for_visible(basename, size=len(content)) + else: + self.wait_for_visible(basename) return rproc @@ -1011,19 +1019,27 @@ class CephFSMount(object): if nr_links == 2: return - def wait_for_visible(self, basename="background_file", timeout=30): + def wait_for_visible(self, basename="background_file", size=None, timeout=30): i = 0 + args = ['stat'] + if size is not None: + args += ['--printf=%s'] + args += [os.path.join(self.hostfs_mntpt, basename)] while i < timeout: - r = self.client_remote.run(args=[ - 'stat', os.path.join(self.hostfs_mntpt, basename) - ], check_status=False) - if r.exitstatus == 0: - log.debug("File {0} became visible from {1} after {2}s".format( - basename, self.client_id, i)) - return - else: - time.sleep(1) - i += 1 + p = self.client_remote.run(args=args, stdout=StringIO(), check_status=False) + if p.exitstatus == 0: + if size is not None: + s = p.stdout.getvalue().strip() + if int(s) == size: + log.info(f"File {basename} became visible with size {size} from {self.client_id} after {i}s") + return + else: + log.error(f"File {basename} became visible but with size {int(s)} not {size}") + else: + log.info(f"File {basename} became visible from {self.client_id} after {i}s") + return + time.sleep(1) + i += 1 raise RuntimeError("Timed out after {0}s waiting for {1} to become visible from {2}".format( i, basename, self.client_id)) @@ -1229,7 +1245,8 @@ class CephFSMount(object): self.background_procs.append(rproc) return rproc - def create_n_files(self, fs_path, count, sync=False, dirsync=False, unlink=False, finaldirsync=False): + def create_n_files(self, fs_path, count, sync=False, dirsync=False, + unlink=False, finaldirsync=False, hard_links=0): """ Create n files. @@ -1237,6 +1254,7 @@ class CephFSMount(object): :param dirsync: sync the containing directory after closing the file :param unlink: unlink the file after closing :param finaldirsync: sync the containing directory after closing the last file + :param hard_links: create given number of hard link(s) for each file """ assert(self.is_mounted()) @@ -1245,8 +1263,12 @@ class CephFSMount(object): pyscript = dedent(f""" import os + import uuid n = {count} + create_hard_links = False + if {hard_links} > 0: + create_hard_links = True path = "{abs_path}" dpath = os.path.dirname(path) @@ -1267,6 +1289,9 @@ class CephFSMount(object): os.unlink(fpath) if {dirsync}: os.fsync(dirfd) + if create_hard_links: + for j in range({hard_links}): + os.system(f"ln {{fpath}} {{dpath}}/{{fnameprefix}}_{{i}}_{{uuid.uuid4()}}") if {finaldirsync}: os.fsync(dirfd) finally: diff --git a/ceph/qa/tasks/cephfs/test_admin.py b/ceph/qa/tasks/cephfs/test_admin.py index e6ad57b5c..9890381c6 100644 --- a/ceph/qa/tasks/cephfs/test_admin.py +++ b/ceph/qa/tasks/cephfs/test_admin.py @@ -8,7 +8,7 @@ from os.path import join as os_path_join from teuthology.exceptions import CommandFailedError -from tasks.cephfs.cephfs_test_case import CephFSTestCase +from tasks.cephfs.cephfs_test_case import CephFSTestCase, classhook from tasks.cephfs.filesystem import FileLayout, FSMissing from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.caps_helper import CapTester @@ -37,6 +37,34 @@ class TestAdminCommands(CephFSTestCase): if overwrites: self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true') +@classhook('_add_valid_tell') +class TestValidTell(TestAdminCommands): + @classmethod + def _add_valid_tell(cls): + tells = [ + ['cache', 'status'], + ['damage', 'ls'], + ['dump_blocked_ops'], + ['dump_blocked_ops_count'], + ['dump_historic_ops'], + ['dump_historic_ops_by_duration'], + ['dump_mempools'], + ['dump_ops_in_flight'], + ['flush', 'journal'], + ['get', 'subtrees'], + ['ops', 'locks'], + ['ops'], + ['status'], + ['version'], + ] + def test(c): + def f(self): + J = self.fs.rank_tell(c) + json.dumps(J) + log.debug("dumped:\n%s", str(J)) + return f + for c in tells: + setattr(cls, 'test_valid_' + '_'.join(c), test(c)) class TestFsStatus(TestAdminCommands): """ diff --git a/ceph/qa/tasks/cephfs/test_client_limits.py b/ceph/qa/tasks/cephfs/test_client_limits.py index 93484c9aa..c4215df33 100644 --- a/ceph/qa/tasks/cephfs/test_client_limits.py +++ b/ceph/qa/tasks/cephfs/test_client_limits.py @@ -9,7 +9,9 @@ from textwrap import dedent from tasks.ceph_test_case import TestTimeoutError from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming from tasks.cephfs.fuse_mount import FuseMount +from teuthology.exceptions import CommandFailedError import os +from io import StringIO log = logging.getLogger(__name__) @@ -156,29 +158,49 @@ class TestClientLimits(CephFSTestCase): a fraction of second (0.5) by default when throttling condition is met. """ - max_caps_per_client = 500 - cap_acquisition_throttle = 250 + subdir_count = 4 + files_per_dir = 25 - self.config_set('mds', 'mds_max_caps_per_client', max_caps_per_client) - self.config_set('mds', 'mds_session_cap_acquisition_throttle', cap_acquisition_throttle) + # throttle in a way so that two dir reads are already hitting it. + throttle_value = (files_per_dir * 3) // 2 - # Create 1500 files split across 6 directories, 250 each. - for i in range(1, 7): - self.mount_a.create_n_files("dir{0}/file".format(i), cap_acquisition_throttle, sync=True) + # activate throttling logic by setting max per client to a low value + self.config_set('mds', 'mds_max_caps_per_client', 1) + self.config_set('mds', 'mds_session_cap_acquisition_throttle', throttle_value) - mount_a_client_id = self.mount_a.get_global_id() + # Create files split across {subdir_count} directories, {per_dir_count} in each dir + for i in range(1, subdir_count+1): + self.mount_a.create_n_files("dir{0}/file".format(i), files_per_dir, sync=True) - # recursive readdir - self.mount_a.run_shell_payload("find | wc") + mount_a_client_id = self.mount_a.get_global_id() - # validate cap_acquisition decay counter after readdir to exceed throttle count i.e 250 - cap_acquisition_value = self.get_session(mount_a_client_id)['cap_acquisition']['value'] - self.assertGreaterEqual(cap_acquisition_value, cap_acquisition_throttle) + # recursive readdir. macOs wants an explicit directory for `find`. + proc = self.mount_a.run_shell_payload("find . | wc", stderr=StringIO()) + # return code may be None if the command got interrupted + self.assertTrue(proc.returncode is None or proc.returncode == 0, proc.stderr.getvalue()) # validate the throttle condition to be hit atleast once cap_acquisition_throttle_hit_count = self.perf_dump()['mds_server']['cap_acquisition_throttle'] self.assertGreaterEqual(cap_acquisition_throttle_hit_count, 1) + # validate cap_acquisition decay counter after readdir to NOT exceed the throttle value + # plus one batch that could have been taken immediately before querying + # assuming the batch is equal to the per dir file count. + cap_acquisition_value = self.get_session(mount_a_client_id)['cap_acquisition']['value'] + self.assertLessEqual(cap_acquisition_value, files_per_dir + throttle_value) + + # make sure that the throttle was reported in the events + def historic_ops_have_event(expected_event): + ops_dump = self.fs.rank_tell(['dump_historic_ops']) + # reverse the events and the ops assuming that later ops would be throttled + for op in reversed(ops_dump['ops']): + for ev in reversed(op.get('type_data', {}).get('events', [])): + if ev['event'] == expected_event: + return True + return False + + self.assertTrue(historic_ops_have_event('cap_acquisition_throttle')) + def test_client_release_bug(self): """ When a client has a bug (which we will simulate) preventing it from releasing caps, @@ -218,6 +240,55 @@ class TestClientLimits(CephFSTestCase): self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id]) rproc.wait() + def test_client_blocklisted_oldest_tid(self): + """ + that a client is blocklisted when its encoded session metadata exceeds the + configured threshold (due to ever growing `completed_requests` caused due + to an unidentified bug (in the client or the MDS)). + """ + + # num of requests client issues + max_requests = 10000 + + # The debug hook to inject the failure only exists in the fuse client + if not isinstance(self.mount_a, FuseMount): + self.skipTest("Require FUSE client to inject client release failure") + + self.config_set('client', 'client inject fixed oldest tid', 'true') + self.mount_a.teardown() + self.mount_a.mount_wait() + + self.config_set('mds', 'mds_max_completed_requests', max_requests); + + # Create lots of files + self.mount_a.create_n_files("testdir/file1", max_requests + 100) + + # Create a few files synchronously. This makes sure previous requests are completed + self.mount_a.create_n_files("testdir/file2", 5, True) + + # Wait for the health warnings. Assume mds can handle 10 request per second at least + self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests // 10, check_in_detail=str(self.mount_a.client_id)) + + # set the threshold low so that it has a high probability of + # hitting. + self.config_set('mds', 'mds_session_metadata_threshold', 5000); + + # Create lot many files synchronously. This would hit the session metadata threshold + # causing the client to get blocklisted. + with self.assertRaises(CommandFailedError): + self.mount_a.create_n_files("testdir/file2", 100000, True) + + self.mds_cluster.is_addr_blocklisted(self.mount_a.get_global_addr()) + # the mds should bump up the relevant perf counter + pd = self.perf_dump() + self.assertGreater(pd['mds_sessions']['mdthresh_evicted'], 0) + + # reset the config + self.config_set('client', 'client inject fixed oldest tid', 'false') + + self.mount_a.kill_cleanup() + self.mount_a.mount_wait() + def test_client_oldest_tid(self): """ When a client does not advance its oldest tid, the MDS should notice that diff --git a/ceph/qa/tasks/cephfs/test_client_recovery.py b/ceph/qa/tasks/cephfs/test_client_recovery.py index f8a50ad37..1bd6884a9 100644 --- a/ceph/qa/tasks/cephfs/test_client_recovery.py +++ b/ceph/qa/tasks/cephfs/test_client_recovery.py @@ -7,7 +7,9 @@ import logging from textwrap import dedent import time import distutils.version as version +import random import re +import string import os from teuthology.orchestra import run @@ -217,8 +219,10 @@ class TestClientRecovery(CephFSTestCase): # Capability release from stale session # ===================================== if write: - cap_holder = self.mount_a.open_background() + content = ''.join(random.choices(string.ascii_uppercase + string.digits, k=16)) + cap_holder = self.mount_a.open_background(content=content) else: + content = '' self.mount_a.run_shell(["touch", "background_file"]) self.mount_a.umount_wait() self.mount_a.mount_wait() @@ -229,7 +233,7 @@ class TestClientRecovery(CephFSTestCase): # Wait for the file to be visible from another client, indicating # that mount_a has completed its network ops - self.mount_b.wait_for_visible() + self.mount_b.wait_for_visible(size=len(content)) # Simulate client death self.mount_a.suspend_netns() @@ -260,11 +264,9 @@ class TestClientRecovery(CephFSTestCase): "Capability handover took {0}, expected approx {1}".format( cap_waited, session_timeout )) - - self.mount_a._kill_background(cap_holder) finally: - # teardown() doesn't quite handle this case cleanly, so help it out - self.mount_a.resume_netns() + self.mount_a.resume_netns() # allow the mount to recover otherwise background proc is unkillable + self.mount_a._kill_background(cap_holder) def test_stale_read_caps(self): self._test_stale_caps(False) @@ -315,9 +317,9 @@ class TestClientRecovery(CephFSTestCase): cap_waited, session_timeout / 2.0 )) - self.mount_a._kill_background(cap_holder) finally: - self.mount_a.resume_netns() + self.mount_a.resume_netns() # allow the mount to recover otherwise background proc is unkillable + self.mount_a._kill_background(cap_holder) def test_trim_caps(self): # Trim capability when reconnecting MDS @@ -383,7 +385,6 @@ class TestClientRecovery(CephFSTestCase): self.mount_b.check_filelock(do_flock=flockable) - # Tear down the background process self.mount_a._kill_background(lock_holder) def test_filelock_eviction(self): @@ -412,7 +413,6 @@ class TestClientRecovery(CephFSTestCase): # succeed self.wait_until_true(lambda: lock_taker.finished, timeout=10) finally: - # Tear down the background process self.mount_a._kill_background(lock_holder) # teardown() doesn't quite handle this case cleanly, so help it out diff --git a/ceph/qa/tasks/cephfs/test_damage.py b/ceph/qa/tasks/cephfs/test_damage.py index d83187017..bfaa23453 100644 --- a/ceph/qa/tasks/cephfs/test_damage.py +++ b/ceph/qa/tasks/cephfs/test_damage.py @@ -608,8 +608,9 @@ class TestDamage(CephFSTestCase): self.fs.flush() self.config_set("mds", "mds_inject_rename_corrupt_dentry_first", "1.0") time.sleep(5) # for conf to percolate - p = self.mount_a.run_shell_payload("timeout 60 mv a/b a/z", wait=False) - self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(), timeout=self.fs.beacon_timeout) + with self.assert_cluster_log("MDS abort because newly corrupt dentry"): + p = self.mount_a.run_shell_payload("timeout 60 mv a/b a/z", wait=False) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(), timeout=self.fs.beacon_timeout) self.config_rm("mds", "mds_inject_rename_corrupt_dentry_first") self.fs.rank_freeze(False, rank=0) self.delete_mds_coredump(rank0['name']) @@ -642,9 +643,10 @@ class TestDamage(CephFSTestCase): rank0 = self.fs.get_rank() self.fs.rank_freeze(True, rank=0) # so now we want to trigger commit but this will crash, so: - c = ['--connect-timeout=60', 'tell', f"mds.{fscid}:0", "flush", "journal"] - p = self.ceph_cluster.mon_manager.run_cluster_cmd(args=c, wait=False, timeoutcmd=30) - self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(), timeout=self.fs.beacon_timeout) + with self.assert_cluster_log("MDS abort because newly corrupt dentry"): + c = ['--connect-timeout=60', 'tell', f"mds.{fscid}:0", "flush", "journal"] + p = self.ceph_cluster.mon_manager.run_cluster_cmd(args=c, wait=False, timeoutcmd=30) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(), timeout=self.fs.beacon_timeout) self.config_rm("mds", "mds_inject_journal_corrupt_dentry_first") self.fs.rank_freeze(False, rank=0) self.delete_mds_coredump(rank0['name']) diff --git a/ceph/qa/tasks/cephfs/test_failover.py b/ceph/qa/tasks/cephfs/test_failover.py index 6149c6bd6..ddcc58ccc 100644 --- a/ceph/qa/tasks/cephfs/test_failover.py +++ b/ceph/qa/tasks/cephfs/test_failover.py @@ -14,9 +14,12 @@ class TestClusterAffinity(CephFSTestCase): CLIENTS_REQUIRED = 0 MDSS_REQUIRED = 4 - def _verify_join_fs(self, target, status=None): + def _verify_join_fs(self, target, status=None, fs=None): + fs_select = fs + if fs_select is None: + fs_select = self.fs if status is None: - status = self.fs.wait_for_daemons(timeout=30) + status = fs_select.wait_for_daemons(timeout=30) log.debug("%s", status) target = sorted(target, key=operator.itemgetter('name')) log.info("target = %s", target) @@ -37,11 +40,14 @@ class TestClusterAffinity(CephFSTestCase): return self.fail("no entity") - def _verify_init(self): - status = self.fs.status() + def _verify_init(self, fs=None): + fs_select = fs + if fs_select is None: + fs_select = self.fs + status = fs_select.status() log.info("status = {0}".format(status)) target = [{'join_fscid': -1, 'name': info['name']} for info in status.get_all()] - self._verify_join_fs(target, status=status) + self._verify_join_fs(target, status=status, fs=fs_select) return (status, target) def _reach_target(self, target): @@ -76,7 +82,8 @@ class TestClusterAffinity(CephFSTestCase): self._change_target_state(target, names[0], {'join_fscid': self.fs.id}) self._change_target_state(target, names[1], {'join_fscid': self.fs.id}) self._reach_target(target) - status = self.fs.status() + time.sleep(5) # MDSMonitor tick + status = self.fs.wait_for_daemons() active = self.fs.get_active_names(status=status)[0] self.assertIn(active, names) self.config_rm('mds.'+active, 'mds_join_fs') @@ -106,12 +113,21 @@ class TestClusterAffinity(CephFSTestCase): fs2 = self.mds_cluster.newfs(name="cephfs2") status, target = self._verify_init() active = self.fs.get_active_names(status=status)[0] + status2, _ = self._verify_init(fs=fs2) + active2 = fs2.get_active_names(status=status2)[0] standbys = [info['name'] for info in status.get_standbys()] victim = standbys.pop() # Set a bogus fs on the others for mds in standbys: self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2') self._change_target_state(target, mds, {'join_fscid': fs2.id}) + # The active MDS for cephfs2 will be replaced by the MDS for which + # file system affinity has been set. Also, set the affinity for + # the earlier active MDS so that it is not chosen by the monitors + # as an active MDS for the existing file system. + log.info(f'assigning affinity to cephfs2 for active mds (mds.{active2})') + self.config_set(f'mds.{active2}', 'mds_join_fs', 'cephfs2') + self._change_target_state(target, active2, {'join_fscid': fs2.id}) self.fs.rank_fail() self._change_target_state(target, victim, {'state': 'up:active'}) self._reach_target(target) diff --git a/ceph/qa/tasks/cephfs/test_mirroring.py b/ceph/qa/tasks/cephfs/test_mirroring.py index a5f8cdac7..c1a940e3f 100644 --- a/ceph/qa/tasks/cephfs/test_mirroring.py +++ b/ceph/qa/tasks/cephfs/test_mirroring.py @@ -1261,3 +1261,38 @@ class TestMirroring(CephFSTestCase): self.verify_snapshot('d2', 'snap0') self.disable_mirroring(self.primary_fs_name, self.primary_fs_id) + + def test_local_and_remote_dir_root_mode(self): + log.debug('reconfigure client auth caps') + cid = self.mount_b.client_id + data_pool = self.backup_fs.get_data_pool_name() + self.mds_cluster.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', f"client.{cid}", + 'mds', 'allow rw', + 'mon', 'allow r', + 'osd', f"allow rw pool={data_pool}, allow rw pool={data_pool}") + + log.debug(f'mounting filesystem {self.secondary_fs_name}') + self.mount_b.umount_wait() + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) + + self.mount_a.run_shell(["mkdir", "l1"]) + self.mount_a.run_shell(["mkdir", "l1/.snap/snap0"]) + self.mount_a.run_shell(["chmod", "go-rwx", "l1"]) + + self.enable_mirroring(self.primary_fs_name, self.primary_fs_id) + self.add_directory(self.primary_fs_name, self.primary_fs_id, '/l1') + self.peer_add(self.primary_fs_name, self.primary_fs_id, "client.mirror_remote@ceph", self.secondary_fs_name) + + time.sleep(60) + self.check_peer_status(self.primary_fs_name, self.primary_fs_id, + "client.mirror_remote@ceph", '/l1', 'snap0', 1) + + mode_local = self.mount_a.run_shell(["stat", "--format=%A", "l1"]).stdout.getvalue().strip() + mode_remote = self.mount_b.run_shell(["stat", "--format=%A", "l1"]).stdout.getvalue().strip() + + self.assertTrue(mode_local == mode_remote, f"mode mismatch, local mode: {mode_local}, remote mode: {mode_remote}") + + self.disable_mirroring(self.primary_fs_name, self.primary_fs_id) + self.mount_a.run_shell(["rmdir", "l1/.snap/snap0"]) + self.mount_a.run_shell(["rmdir", "l1"]) diff --git a/ceph/qa/tasks/cephfs/test_misc.py b/ceph/qa/tasks/cephfs/test_misc.py index 60e7c0514..8b48dee69 100644 --- a/ceph/qa/tasks/cephfs/test_misc.py +++ b/ceph/qa/tasks/cephfs/test_misc.py @@ -483,6 +483,29 @@ class TestMisc(CephFSTestCase): return self.assertTrue(False, "Failed to dump in-memory logs during missed internal heartbeat") + def _session_client_ls(self, cmd): + mount_a_client_id = self.mount_a.get_global_id() + info = self.fs.rank_asok(cmd) + mount_a_mountpoint = self.mount_a.mountpoint + mount_b_mountpoint = self.mount_b.mountpoint + self.assertIsNotNone(info) + for i in range(0, len(info)): + self.assertIn(info[i]["client_metadata"]["mount_point"], + [mount_a_mountpoint, mount_b_mountpoint]) + info = self.fs.rank_asok(cmd + [f"id={mount_a_client_id}"]) + self.assertEqual(len(info), 1) + self.assertEqual(info[0]["id"], mount_a_client_id) + self.assertEqual(info[0]["client_metadata"]["mount_point"], mount_a_mountpoint) + info = self.fs.rank_asok(cmd + ['--cap_dump']) + for i in range(0, len(info)): + self.assertIn("caps", info[i]) + + def test_session_ls(self): + self._session_client_ls(['session', 'ls']) + + def test_client_ls(self): + self._session_client_ls(['client', 'ls']) + class TestCacheDrop(CephFSTestCase): CLIENTS_REQUIRED = 1 diff --git a/ceph/qa/tasks/cephfs/test_scrub_checks.py b/ceph/qa/tasks/cephfs/test_scrub_checks.py index 0e84f7ed2..e41b997a6 100644 --- a/ceph/qa/tasks/cephfs/test_scrub_checks.py +++ b/ceph/qa/tasks/cephfs/test_scrub_checks.py @@ -296,6 +296,36 @@ class TestScrubChecks(CephFSTestCase): command = "flush_path /" self.asok_command(mds_rank, command, success_validator) + def scrub_with_stray_evaluation(self, fs, mnt, path, flag, files=2000, + _hard_links=3): + fs.set_allow_new_snaps(True) + + test_dir = "stray_eval_dir" + mnt.run_shell(["mkdir", test_dir]) + client_path = os.path.join(mnt.mountpoint, test_dir) + mnt.create_n_files(fs_path=f"{test_dir}/file", count=files, + hard_links=_hard_links) + mnt.run_shell(["mkdir", f"{client_path}/.snap/snap1-{test_dir}"]) + mnt.run_shell(f"find {client_path}/ -type f -delete") + mnt.run_shell(["rmdir", f"{client_path}/.snap/snap1-{test_dir}"]) + perf_dump = fs.rank_tell(["perf", "dump"], 0) + self.assertNotEqual(perf_dump.get('mds_cache').get('num_strays'), + 0, "mdcache.num_strays is zero") + + log.info( + f"num of strays: {perf_dump.get('mds_cache').get('num_strays')}") + + out_json = fs.run_scrub(["start", path, flag]) + self.assertNotEqual(out_json, None) + self.assertEqual(out_json["return_code"], 0) + + self.assertEqual( + fs.wait_until_scrub_complete(tag=out_json["scrub_tag"]), True) + + perf_dump = fs.rank_tell(["perf", "dump"], 0) + self.assertEqual(int(perf_dump.get('mds_cache').get('num_strays')), + 0, "mdcache.num_strays is non-zero") + def test_scrub_repair(self): mds_rank = 0 test_dir = "scrub_repair_path" @@ -332,6 +362,20 @@ class TestScrubChecks(CephFSTestCase): # fragstat should be fixed self.mount_a.run_shell(["rmdir", test_dir]) + def test_stray_evaluation_with_scrub(self): + """ + test that scrub can iterate over ~mdsdir and evaluate strays + """ + self.scrub_with_stray_evaluation(self.fs, self.mount_a, "~mdsdir", + "recursive") + + def test_flag_scrub_mdsdir(self): + """ + test flag scrub_mdsdir + """ + self.scrub_with_stray_evaluation(self.fs, self.mount_a, "/", + "recursive,scrub_mdsdir") + @staticmethod def json_validator(json_out, rc, element, expected_value): if rc != 0: diff --git a/ceph/qa/tasks/cephfs/test_snap_schedules.py b/ceph/qa/tasks/cephfs/test_snap_schedules.py index 736f49fe2..0264cac32 100644 --- a/ceph/qa/tasks/cephfs/test_snap_schedules.py +++ b/ceph/qa/tasks/cephfs/test_snap_schedules.py @@ -39,8 +39,9 @@ class TestSnapSchedulesHelper(CephFSTestCase): return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args) def fs_snap_schedule_cmd(self, *args, **kwargs): - fs = kwargs.pop('fs', self.volname) - args += ('--fs', fs) + if 'fs' in kwargs: + fs = kwargs.pop('fs') + args += ('--fs', fs) if 'format' in kwargs: fmt = kwargs.pop('format') args += ('--format', fmt) @@ -372,6 +373,9 @@ class TestSnapSchedules(TestSnapSchedulesHelper): snap_stats['fs_count'] = fs_count snap_stats['db_count'] = db_count + log.debug(f'fs_count: {fs_count}') + log.debug(f'db_count: {db_count}') + return snap_stats def verify_snap_stats(self, dir_path): @@ -477,6 +481,42 @@ class TestSnapSchedules(TestSnapSchedulesHelper): # remove snapshot schedule self.fs_snap_schedule_cmd('remove', path="/bad-path") + def test_snap_schedule_for_number_of_snaps_retention(self): + """ + Test that number of snaps retained are as per user spec. + """ + total_snaps = 55 + test_dir = '/' + TestSnapSchedules.TEST_DIRECTORY + + self.mount_a.run_shell(['mkdir', '-p', test_dir[1:]]) + + # set a schedule on the dir + self.fs_snap_schedule_cmd('add', path=test_dir, snap_schedule='1M') + self.fs_snap_schedule_cmd('retention', 'add', path=test_dir, + retention_spec_or_period=f'{total_snaps}n') + exec_time = time.time() + + timo_1, snap_sfx = self.calc_wait_time_and_snap_name(exec_time, '1M') + + # verify snapshot schedule + self.verify_schedule(test_dir, ['1M']) + + # we wait for total_snaps snaps to be taken + wait_time = timo_1 + total_snaps * 60 + 15 + time.sleep(wait_time) + + snap_stats = self.get_snap_stats(test_dir) + self.assertTrue(snap_stats['fs_count'] == total_snaps) + self.assertTrue(snap_stats['db_count'] >= total_snaps) + + # remove snapshot schedule + self.fs_snap_schedule_cmd('remove', path=test_dir) + + # remove all scheduled snapshots + self.remove_snapshots(test_dir[1:]) + + self.mount_a.run_shell(['rmdir', test_dir[1:]]) + class TestSnapSchedulesSnapdir(TestSnapSchedulesHelper): def remove_snapshots(self, dir_path, sdn): @@ -523,3 +563,45 @@ class TestSnapSchedulesSnapdir(TestSnapSchedulesHelper): self.remove_snapshots(TestSnapSchedulesSnapdir.TEST_DIRECTORY, sdn) self.mount_a.run_shell(['rmdir', TestSnapSchedulesSnapdir.TEST_DIRECTORY]) + + +""" +Note that the class TestSnapSchedulesMandatoryFSArgument tests snap-schedule +commands only for multi-fs scenario. Commands for a single default fs should +pass for tests defined above or elsewhere. +""" + + +class TestSnapSchedulesMandatoryFSArgument(TestSnapSchedulesHelper): + REQUIRE_BACKUP_FILESYSTEM = True + TEST_DIRECTORY = 'mandatory_fs_argument_test_dir' + + def test_snap_schedule_without_fs_argument(self): + """Test command fails without --fs argument in presence of multiple fs""" + test_path = TestSnapSchedulesMandatoryFSArgument.TEST_DIRECTORY + self.mount_a.run_shell(['mkdir', '-p', test_path]) + + # try setting a schedule on the dir; this should fail now that we are + # working with mutliple fs; we need the --fs argument if there are more + # than one fs hosted by the same cluster + with self.assertRaises(CommandFailedError): + self.fs_snap_schedule_cmd('add', test_path, snap_schedule='1M') + + self.mount_a.run_shell(['rmdir', test_path]) + + def test_snap_schedule_for_non_default_fs(self): + """Test command succes with --fs argument for non-default fs""" + test_path = TestSnapSchedulesMandatoryFSArgument.TEST_DIRECTORY + self.mount_a.run_shell(['mkdir', '-p', test_path]) + + # use the backup fs as the second fs; all these commands must pass + self.fs_snap_schedule_cmd('add', test_path, snap_schedule='1M', fs='backup_fs') + self.fs_snap_schedule_cmd('activate', test_path, snap_schedule='1M', fs='backup_fs') + self.fs_snap_schedule_cmd('retention', 'add', test_path, retention_spec_or_period='1M', fs='backup_fs') + self.fs_snap_schedule_cmd('list', test_path, fs='backup_fs', format='json') + self.fs_snap_schedule_cmd('status', test_path, fs='backup_fs', format='json') + self.fs_snap_schedule_cmd('retention', 'remove', test_path, retention_spec_or_period='1M', fs='backup_fs') + self.fs_snap_schedule_cmd('deactivate', test_path, snap_schedule='1M', fs='backup_fs') + self.fs_snap_schedule_cmd('remove', test_path, snap_schedule='1M', fs='backup_fs') + + self.mount_a.run_shell(['rmdir', test_path]) diff --git a/ceph/qa/tasks/cephfs/test_volumes.py b/ceph/qa/tasks/cephfs/test_volumes.py index 02db70e19..2ecfeb327 100644 --- a/ceph/qa/tasks/cephfs/test_volumes.py +++ b/ceph/qa/tasks/cephfs/test_volumes.py @@ -442,9 +442,21 @@ class TestVolumes(TestVolumesHelper): if not (volname in ([volume['name'] for volume in volumels])): raise RuntimeError("Error creating volume '{0}'".format(volname)) - else: - # clean up - self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it") + + # check that the pools were created with the correct config + pool_details = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json")) + pool_flags = {} + for pool in pool_details: + pool_flags[pool["pool_id"]] = pool["flags_names"].split(",") + + volume_details = json.loads(self._fs_cmd("get", volname, "--format=json")) + for data_pool_id in volume_details['mdsmap']['data_pools']: + self.assertIn("bulk", pool_flags[data_pool_id]) + meta_pool_id = volume_details['mdsmap']['metadata_pool'] + self.assertNotIn("bulk", pool_flags[meta_pool_id]) + + # clean up + self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it") def test_volume_ls(self): """ @@ -649,6 +661,24 @@ class TestVolumes(TestVolumesHelper): self.assertEqual(vol_info["used_size"], 0, "Size should be zero when volumes directory is empty") + def test_volume_info_pending_subvol_deletions(self): + """ + Tests the pending_subvolume_deletions in 'fs volume info' command + """ + subvolname = self._generate_random_subvolume_name() + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--mode=777") + # create 3K zero byte files + self._do_subvolume_io(subvolname, number_of_files=3000, file_size=0) + # Delete the subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + # get volume metadata + vol_info = json.loads(self._get_volume_info(self.volname)) + self.assertNotEqual(vol_info['pending_subvolume_deletions'], 0, + "pending_subvolume_deletions should be 1") + # verify trash dir is clean + self._wait_for_trash_empty() + def test_volume_info_without_subvolumegroup(self): """ Tests the 'fs volume info' command without subvolume group diff --git a/ceph/qa/tasks/cephfs/xfstests_dev.py b/ceph/qa/tasks/cephfs/xfstests_dev.py index bb7890001..cbb344305 100644 --- a/ceph/qa/tasks/cephfs/xfstests_dev.py +++ b/ceph/qa/tasks/cephfs/xfstests_dev.py @@ -184,9 +184,11 @@ class XFSTestsDev(CephFSTestCase): gawk gcc indent libtool lvm2 make psmisc quota sed \ xfsdump xfsprogs \ libacl-devel libattr-devel libaio-devel libuuid-devel \ - xfsprogs-devel btrfs-progs-devel python2 sqlite""".split() + xfsprogs-devel btrfs-progs-devel python3 sqlite""".split() if self.install_xfsprogs: + if distro == 'centosstream' and major_ver_num == 8: + deps += ['--enablerepo=powertools'] deps += ['inih-devel', 'userspace-rcu-devel', 'libblkid-devel', 'gettext', 'libedit-devel', 'libattr-devel', 'device-mapper-devel', 'libicu-devel'] diff --git a/ceph/qa/tasks/mgr/dashboard/test_pool.py b/ceph/qa/tasks/mgr/dashboard/test_pool.py index 10fff79ff..0699be48c 100644 --- a/ceph/qa/tasks/mgr/dashboard/test_pool.py +++ b/ceph/qa/tasks/mgr/dashboard/test_pool.py @@ -285,6 +285,7 @@ class PoolTest(DashboardTestCase): 'pool': 'dashboard_pool_quota1', 'pg_num': '32', 'pool_type': 'replicated', + 'application_metadata': ['rbd'], }, 'pool_quotas_to_check': { 'quota_max_objects': 0, @@ -296,6 +297,7 @@ class PoolTest(DashboardTestCase): 'pool': 'dashboard_pool_quota2', 'pg_num': '32', 'pool_type': 'replicated', + 'application_metadata': ['rbd'], 'quota_max_objects': 1024, 'quota_max_bytes': 1000, }, diff --git a/ceph/qa/tasks/mgr/mgr_test_case.py b/ceph/qa/tasks/mgr/mgr_test_case.py index 5fe5e0759..94a230c8d 100644 --- a/ceph/qa/tasks/mgr/mgr_test_case.py +++ b/ceph/qa/tasks/mgr/mgr_test_case.py @@ -39,6 +39,14 @@ class MgrCluster(CephCluster): return json.loads( self.mon_manager.raw_cluster_cmd("mgr", "dump", "--format=json-pretty")) + def get_registered_clients(self, name, mgr_map = None): + if mgr_map is None: + mgr_map = self.get_mgr_map() + for c in mgr_map['active_clients']: + if c['name'] == name: + return c['addrvec'] + return None + def get_active_id(self): return self.get_mgr_map()["active_name"] diff --git a/ceph/qa/tasks/mgr/test_failover.py b/ceph/qa/tasks/mgr/test_failover.py index a4e840883..bfff11262 100644 --- a/ceph/qa/tasks/mgr/test_failover.py +++ b/ceph/qa/tasks/mgr/test_failover.py @@ -146,3 +146,37 @@ class TestFailover(MgrTestCase): timeout=60 ) self.assertEqual(self.mgr_cluster.get_active_id(), original_active) + +class TestLibCephSQLiteFailover(MgrTestCase): + MGRS_REQUIRED = 1 + + def setUp(self): + super(TestLibCephSQLiteFailover, self).setUp() + self.setup_mgrs() + + def get_libcephsqlite(self): + mgr_map = self.mgr_cluster.get_mgr_map() + addresses = self.mgr_cluster.get_registered_clients('libcephsqlite', mgr_map=mgr_map) + self.assertEqual(len(addresses), 1) + return addresses[0] + + def test_maybe_reonnect(self): + """ + That the devicehealth module can recover after losing its libcephsqlite lock. + """ + + # make sure the database is populated and loaded by the module + self.mgr_cluster.mon_manager.ceph("device scrape-health-metrics") + + oldaddr = self.get_libcephsqlite() + self.mgr_cluster.mon_manager.ceph(f"osd blocklist add {oldaddr['addr']}/{oldaddr['nonce']}") + + def test(): + self.mgr_cluster.mon_manager.ceph("device scrape-health-metrics") + newaddr = self.get_libcephsqlite() + return oldaddr != newaddr + + self.wait_until_true( + test, + timeout=30 + ) diff --git a/ceph/qa/valgrind.supp b/ceph/qa/valgrind.supp index 8caedfd8b..6a5a08f14 100644 --- a/ceph/qa/valgrind.supp +++ b/ceph/qa/valgrind.supp @@ -689,3 +689,15 @@ fun:EVP_DecryptFinal_ex ... } +{ + tracker #62141 : valgrind: UninitCondition under __run_exit_handlers + Memcheck:Cond + fun:free + fun:free_res + fun:__libc_freeres + fun:_vgnU_freeres + fun:__run_exit_handlers + fun:exit + fun:(below main) +} + diff --git a/ceph/qa/workunits/cephadm/test_cephadm.sh b/ceph/qa/workunits/cephadm/test_cephadm.sh index cca9cbc7b..7d06a3326 100755 --- a/ceph/qa/workunits/cephadm/test_cephadm.sh +++ b/ceph/qa/workunits/cephadm/test_cephadm.sh @@ -29,9 +29,16 @@ CEPHADM_SAMPLES_DIR=${CEPHADM_SRC_DIR}/samples [ -z "$SUDO" ] && SUDO=sudo +# If cephadm is already installed on the system, use that one, avoid building +# # one if we can. +if [ -z "$CEPHADM" ] && command -v cephadm >/dev/null ; then + CEPHADM="$(command -v cephadm)" +fi + if [ -z "$CEPHADM" ]; then CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX` ${CEPHADM_SRC_DIR}/build.sh "$CEPHADM" + NO_BUILD_INFO=1 fi # at this point, we need $CEPHADM set @@ -162,17 +169,20 @@ $SUDO $CEPHADM check-host ## run a gather-facts (output to stdout) $SUDO $CEPHADM gather-facts -## version + --image -$SUDO CEPHADM_IMAGE=$IMAGE_PACIFIC $CEPHADM_BIN version -$SUDO CEPHADM_IMAGE=$IMAGE_PACIFIC $CEPHADM_BIN version \ - | grep 'ceph version 16' -#$SUDO CEPHADM_IMAGE=$IMAGE_OCTOPUS $CEPHADM_BIN version -#$SUDO CEPHADM_IMAGE=$IMAGE_OCTOPUS $CEPHADM_BIN version \ -# | grep 'ceph version 15' -$SUDO $CEPHADM_BIN --image $IMAGE_MAIN version | grep 'ceph version' +## NOTE: cephadm version is, as of around May 2023, no longer basing the +## output for `cephadm version` on the version of the containers. The version +## reported is that of the "binary" and is determined during the ceph build. +## `cephadm version` should NOT require sudo/root. +$CEPHADM_BIN version +$CEPHADM_BIN version | grep 'cephadm version' +# Typically cmake should be running the cephadm build script with CLI arguments +# that embed version info into the "binary". If not using a cephadm build via +# cmake you can set `NO_BUILD_INFO` to skip this check. +if [ -z "$NO_BUILD_INFO" ]; then + $CEPHADM_BIN version | grep -v 'UNSET' + $CEPHADM_BIN version | grep -v 'UNKNOWN' +fi -# try force docker; this won't work if docker isn't installed -systemctl status docker > /dev/null && ( $CEPHADM --docker version | grep 'ceph version' ) || echo "docker not installed" ## test shell before bootstrap, when crash dir isn't (yet) present on this host $CEPHADM shell --fsid $FSID -- ceph -v | grep 'ceph version' @@ -250,10 +260,13 @@ $CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").version' | grep -q \\. # add mon.b cp $CONFIG $MONCONFIG echo "public addrv = [v2:$IP:3301,v1:$IP:6790]" >> $MONCONFIG -$CEPHADM deploy --name mon.b \ - --fsid $FSID \ - --keyring /var/lib/ceph/$FSID/mon.a/keyring \ - --config $MONCONFIG +jq --null-input \ + --arg fsid $FSID \ + --arg name mon.b \ + --arg keyring /var/lib/ceph/$FSID/mon.a/keyring \ + --arg config "$MONCONFIG" \ + '{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \ + $CEPHADM _orch deploy for u in ceph-$FSID@mon.b; do systemctl is-enabled $u systemctl is-active $u @@ -268,10 +281,13 @@ $CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \ mon 'allow profile mgr' \ osd 'allow *' \ mds 'allow *' > $TMPDIR/keyring.mgr.y -$CEPHADM deploy --name mgr.y \ - --fsid $FSID \ - --keyring $TMPDIR/keyring.mgr.y \ - --config $CONFIG +jq --null-input \ + --arg fsid $FSID \ + --arg name mgr.y \ + --arg keyring $TMPDIR/keyring.mgr.y \ + --arg config "$CONFIG" \ + '{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \ + $CEPHADM _orch deploy for u in ceph-$FSID@mgr.y; do systemctl is-enabled $u systemctl is-active $u @@ -321,30 +337,42 @@ for id in `seq 0 $((--OSD_TO_CREATE))`; do osd_fsid=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_fsid"? | select(.)') # deploy the osd - $CEPHADM deploy --name osd.$osd_id \ - --fsid $FSID \ - --keyring $TMPDIR/keyring.bootstrap.osd \ - --config $CONFIG \ - --osd-fsid $osd_fsid + jq --null-input \ + --arg fsid $FSID \ + --arg name osd.$osd_id \ + --arg keyring $TMPDIR/keyring.bootstrap.osd \ + --arg config "$CONFIG" \ + --arg osd_fsid $osd_fsid \ + '{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config, "osd_fsid": $osd_fsid}}' | \ + $CEPHADM _orch deploy done # add node-exporter -${CEPHADM//--image $IMAGE_DEFAULT/} deploy \ - --name node-exporter.a --fsid $FSID +jq --null-input \ + --arg fsid $FSID \ + --arg name node-exporter.a \ + '{"fsid": $fsid, "name": $name}' | \ + ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy cond="curl 'http://localhost:9100' | grep -q 'Node Exporter'" is_available "node-exporter" "$cond" 10 # add prometheus -cat ${CEPHADM_SAMPLES_DIR}/prometheus.json | \ - ${CEPHADM//--image $IMAGE_DEFAULT/} deploy \ - --name prometheus.a --fsid $FSID --config-json - +jq --null-input \ + --arg fsid $FSID \ + --arg name prometheus.a \ + --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/prometheus.json)" \ + '{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \ + ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy cond="curl 'localhost:9095/api/v1/query?query=up'" is_available "prometheus" "$cond" 10 # add grafana -cat ${CEPHADM_SAMPLES_DIR}/grafana.json | \ - ${CEPHADM//--image $IMAGE_DEFAULT/} deploy \ - --name grafana.a --fsid $FSID --config-json - +jq --null-input \ + --arg fsid $FSID \ + --arg name grafana.a \ + --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/grafana.json)" \ + '{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \ + ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy cond="curl --insecure 'https://localhost:3000' | grep -q 'grafana'" is_available "grafana" "$cond" 50 @@ -357,11 +385,14 @@ $CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \ rados --pool nfs-ganesha --namespace nfs-ns create conf-nfs.a $CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \ ceph orch pause -$CEPHADM deploy --name nfs.a \ - --fsid $FSID \ - --keyring $KEYRING \ - --config $CONFIG \ - --config-json ${CEPHADM_SAMPLES_DIR}/nfs.json +jq --null-input \ + --arg fsid $FSID \ + --arg name nfs.a \ + --arg keyring "$KEYRING" \ + --arg config "$CONFIG" \ + --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json)" \ + '{"fsid": $fsid, "name": $name, "params": {"keyring": $keyring, "config": $config}, "config_blobs": $config_blobs}' | \ + ${CEPHADM} _orch deploy cond="$SUDO ss -tlnp '( sport = :nfs )' | grep 'ganesha.nfsd'" is_available "nfs" "$cond" 10 $CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \ @@ -369,15 +400,17 @@ $CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \ # add alertmanager via custom container alertmanager_image=$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | jq -r '.image') -tcp_ports=$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | jq -r '.ports | map_values(.|tostring) | join(" ")') -cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | \ - ${CEPHADM//--image $IMAGE_DEFAULT/} \ - --image $alertmanager_image \ - deploy \ - --tcp-ports "$tcp_ports" \ - --name container.alertmanager.a \ - --fsid $FSID \ - --config-json - +tcp_ports=$(jq .ports ${CEPHADM_SAMPLES_DIR}/custom_container.json) +jq --null-input \ + --arg fsid $FSID \ + --arg name container.alertmanager.a \ + --arg keyring $TMPDIR/keyring.bootstrap.osd \ + --arg config "$CONFIG" \ + --arg image "$alertmanager_image" \ + --argjson tcp_ports "${tcp_ports}" \ + --argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json)" \ + '{"fsid": $fsid, "name": $name, "image": $image, "params": {"keyring": $keyring, "config": $config, "tcp_ports": $tcp_ports}, "config_blobs": $config_blobs}' | \ + ${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy cond="$CEPHADM enter --fsid $FSID --name container.alertmanager.a -- test -f \ /etc/alertmanager/alertmanager.yml" is_available "alertmanager.yml" "$cond" 10 diff --git a/ceph/qa/workunits/cephtool/test.sh b/ceph/qa/workunits/cephtool/test.sh index ace8bfe6d..aecfd56a9 100755 --- a/ceph/qa/workunits/cephtool/test.sh +++ b/ceph/qa/workunits/cephtool/test.sh @@ -1508,7 +1508,7 @@ function test_mon_osd() done for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \ - norebalance norecover notieragent + norebalance norecover notieragent noautoscale do ceph osd set $f ceph osd unset $f diff --git a/ceph/qa/workunits/mon/pg_autoscaler.sh b/ceph/qa/workunits/mon/pg_autoscaler.sh index bc5003f4c..4cf71a31c 100755 --- a/ceph/qa/workunits/mon/pg_autoscaler.sh +++ b/ceph/qa/workunits/mon/pg_autoscaler.sh @@ -137,10 +137,20 @@ ceph osd pool set bulk0 target_size_bytes 1000 ceph osd pool set meta0 target_size_ratio 1 wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO" +# test autoscale warn + +ceph osd pool create warn0 1 --autoscale-mode=warn +wait_for 120 "ceph health detail | grep POOL_TOO_FEW_PGS" + +ceph osd pool create warn1 256 --autoscale-mode=warn +wait_for 120 "ceph health detail | grep POOL_TOO_MANY_PGS" + ceph osd pool rm meta0 meta0 --yes-i-really-really-mean-it ceph osd pool rm bulk0 bulk0 --yes-i-really-really-mean-it ceph osd pool rm bulk1 bulk1 --yes-i-really-really-mean-it ceph osd pool rm bulk2 bulk2 --yes-i-really-really-mean-it +ceph osd pool rm warn0 warn0 --yes-i-really-really-mean-it +ceph osd pool rm warn1 warn1 --yes-i-really-really-mean-it echo OK diff --git a/ceph/qa/workunits/mon/test_noautoscale_flag.sh b/ceph/qa/workunits/mon/test_noautoscale_flag.sh index ca86cdf22..e1a45a4d8 100755 --- a/ceph/qa/workunits/mon/test_noautoscale_flag.sh +++ b/ceph/qa/workunits/mon/test_noautoscale_flag.sh @@ -23,7 +23,7 @@ sleep 2 RESULT1=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l) -# number of Pools with AUTOSCALE `off` should equal to 2 +# number of Pools with AUTOSCALE `off` should equal to $NUM_POOLS test "$RESULT1" -eq "$NUM_POOLS" @@ -49,7 +49,7 @@ RESULT2=$(ceph osd pool autoscale-status | grep -oe 'on' | wc -l) # number of Pools with AUTOSCALE `on` should equal to 3 -test "$RESULT2" -eq "$NUM_POOLS" +test "$RESULT2" -eq "$[NUM_POOLS-1]" ceph osd pool set noautoscale @@ -74,6 +74,27 @@ RESULT3=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l) test "$RESULT3" -eq "$NUM_POOLS" +# Now we test if we retain individual pool state of autoscale mode +# when we set and unset the noautoscale flag. + +ceph osd pool unset noautoscale + +ceph osd pool set pool_a pg_autoscale_mode on + +ceph osd pool set pool_b pg_autoscale_mode warn + +ceph osd pool set noautoscale + +ceph osd pool unset noautoscale + +RESULT4=$(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off\|warn') +RESULT5=$(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off\|warn') +RESULT6=$(ceph osd pool autoscale-status | grep pool_c | grep -o -m 1 'on\|off\|warn') + +test "$RESULT4" == 'on' +test "$RESULT5" == 'warn' +test "$RESULT6" == 'off' + ceph osd pool rm pool_a pool_a --yes-i-really-really-mean-it ceph osd pool rm pool_b pool_b --yes-i-really-really-mean-it diff --git a/ceph/qa/workunits/rbd/krbd_watch_errors.sh b/ceph/qa/workunits/rbd/krbd_watch_errors.sh new file mode 100755 index 000000000..f650d2a74 --- /dev/null +++ b/ceph/qa/workunits/rbd/krbd_watch_errors.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -ex +set -o pipefail + +function refresh_loop() { + local dev_id="$1" + + set +x + + local i + for ((i = 1; ; i++)); do + echo 1 | sudo tee "${SYSFS_DIR}/${dev_id}/refresh" > /dev/null + if ((i % 100 == 0)); then + echo "Refreshed ${i} times" + fi + done +} + +readonly SYSFS_DIR="/sys/bus/rbd/devices" +readonly IMAGE_NAME="watch-errors-test" + +rbd create -s 1G --image-feature exclusive-lock "${IMAGE_NAME}" + +# induce a watch error every 30 seconds +dev="$(sudo rbd device map -o osdkeepalive=60 "${IMAGE_NAME}")" +dev_id="${dev#/dev/rbd}" + +# constantly refresh, not just on watch errors +refresh_loop "${dev_id}" & +refresh_pid=$! + +sudo dmesg -C + +# test that none of the above triggers a deadlock with a workload +fio --name test --filename="${dev}" --ioengine=libaio --direct=1 \ + --rw=randwrite --norandommap --randrepeat=0 --bs=512 --iodepth=128 \ + --time_based --runtime=1h --eta=never + +num_errors="$(dmesg | grep -c "rbd${dev_id}: encountered watch error")" +echo "Recorded ${num_errors} watch errors" + +kill "${refresh_pid}" +wait + +sudo rbd device unmap "${dev}" + +if ((num_errors < 60)); then + echo "Too few watch errors" + exit 1 +fi + +echo OK diff --git a/ceph/qa/workunits/rbd/rbd-nbd.sh b/ceph/qa/workunits/rbd/rbd-nbd.sh index 122df3d6f..bc89e9be5 100755 --- a/ceph/qa/workunits/rbd/rbd-nbd.sh +++ b/ceph/qa/workunits/rbd/rbd-nbd.sh @@ -472,6 +472,16 @@ DEV= rbd feature disable ${POOL}/${IMAGE} journaling rbd config image rm ${POOL}/${IMAGE} rbd_discard_granularity_bytes +# test that disabling a feature so that the op is proxied to rbd-nbd +# (arranged here by blkdiscard before "rbd feature disable") doesn't hang +DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}` +get_pid ${POOL} +rbd feature enable ${POOL}/${IMAGE} journaling +_sudo blkdiscard --offset 0 --length 4096 ${DEV} +rbd feature disable ${POOL}/${IMAGE} journaling +unmap_device ${DEV} ${PID} +DEV= + # test that rbd_op_threads setting takes effect EXPECTED=`ceph-conf --show-config-value librados_thread_count` DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}` diff --git a/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh b/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh index ca715d854..f4961b925 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_helpers.sh @@ -1169,6 +1169,16 @@ wait_for_snap_removed_from_trash() return 1 } +count_mirror_snaps() +{ + local cluster=$1 + local pool=$2 + local image=$3 + + rbd --cluster ${cluster} snap ls ${pool}/${image} --all | + grep -c -F " mirror (" +} + write_image() { local cluster=$1 diff --git a/ceph/qa/workunits/rbd/rbd_mirror_journal.sh b/ceph/qa/workunits/rbd/rbd_mirror_journal.sh index 56a8b13a9..54f6aeec8 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_journal.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_journal.sh @@ -214,7 +214,29 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped' compare_images ${POOL} ${image} -# force promote +testlog "TEST: failover / failback loop" +for i in `seq 1 20`; do + demote_image ${CLUSTER2} ${POOL} ${image} + wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown' + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown' + promote_image ${CLUSTER1} ${POOL} ${image} + wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image} + wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped' + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying' + demote_image ${CLUSTER1} ${POOL} ${image} + wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown' + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown' + promote_image ${CLUSTER2} ${POOL} ${image} + wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} + wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped' + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' +done + +testlog "TEST: force promote" force_promote_image=test_force_promote create_image ${CLUSTER2} ${POOL} ${force_promote_image} write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100 diff --git a/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh b/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh index 0060440fb..c70d48b09 100755 --- a/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh +++ b/ceph/qa/workunits/rbd/rbd_mirror_snapshot.sh @@ -220,7 +220,32 @@ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped' compare_images ${POOL} ${image} -# force promote +testlog "TEST: failover / failback loop" +for i in `seq 1 20`; do + demote_image ${CLUSTER2} ${POOL} ${image} + wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown' + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown' + promote_image ${CLUSTER1} ${POOL} ${image} + wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image} + wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped' + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying' + demote_image ${CLUSTER1} ${POOL} ${image} + wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown' + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown' + promote_image ${CLUSTER2} ${POOL} ${image} + wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image} + wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image} + wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped' + wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' +done +# check that demote (or other mirror snapshots) don't pile up +test "$(count_mirror_snaps ${CLUSTER1} ${POOL} ${image})" -le 3 +test "$(count_mirror_snaps ${CLUSTER2} ${POOL} ${image})" -le 3 + +testlog "TEST: force promote" force_promote_image=test_force_promote create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${force_promote_image} write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100 diff --git a/ceph/qa/workunits/rbd/rbd_support_module_recovery.sh b/ceph/qa/workunits/rbd/rbd_support_module_recovery.sh new file mode 100755 index 000000000..e9defced2 --- /dev/null +++ b/ceph/qa/workunits/rbd/rbd_support_module_recovery.sh @@ -0,0 +1,77 @@ +#!/bin/bash +set -ex + +POOL=rbd +IMAGE_PREFIX=image +NUM_IMAGES=20 +RUN_TIME=3600 + +rbd mirror pool enable ${POOL} image +rbd mirror pool peer add ${POOL} dummy + +# Create images and schedule their mirror snapshots +for ((i = 1; i <= ${NUM_IMAGES}; i++)); do + rbd create -s 1G --image-feature exclusive-lock ${POOL}/${IMAGE_PREFIX}$i + rbd mirror image enable ${POOL}/${IMAGE_PREFIX}$i snapshot + rbd mirror snapshot schedule add -p ${POOL} --image ${IMAGE_PREFIX}$i 1m +done + +# Run fio workloads on images via kclient +# Test the recovery of the rbd_support module and its scheduler from their +# librbd client being blocklisted while a exclusive lock gets passed around +# between their librbd client and a kclient trying to take mirror snapshots +# and perform I/O on the same image. +for ((i = 1; i <= ${NUM_IMAGES}; i++)); do + DEVS[$i]=$(sudo rbd device map ${POOL}/${IMAGE_PREFIX}$i) + fio --name=fiotest --filename=${DEVS[$i]} --rw=randrw --bs=4K --direct=1 \ + --ioengine=libaio --iodepth=2 --runtime=43200 --time_based \ + &> /dev/null & +done + +# Repeatedly blocklist rbd_support module's client ~10s after the module +# recovers from previous blocklisting +CURRENT_TIME=$(date +%s) +END_TIME=$((CURRENT_TIME + RUN_TIME)) +PREV_CLIENT_ADDR="" +CLIENT_ADDR="" +while ((CURRENT_TIME <= END_TIME)); do + if [[ -n "${CLIENT_ADDR}" ]] && + [[ "${CLIENT_ADDR}" != "${PREV_CLIENT_ADDR}" ]]; then + ceph osd blocklist add ${CLIENT_ADDR} + # Confirm rbd_support module's client is blocklisted + ceph osd blocklist ls | grep -q ${CLIENT_ADDR} + PREV_CLIENT_ADDR=${CLIENT_ADDR} + fi + sleep 10 + CLIENT_ADDR=$(ceph mgr dump | + jq .active_clients[] | + jq 'select(.name == "rbd_support")' | + jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add') + CURRENT_TIME=$(date +%s) +done + +# Confirm that rbd_support module recovered from repeated blocklisting +# Check that you can add a mirror snapshot schedule after a few retries +for ((i = 1; i <= 24; i++)); do + rbd mirror snapshot schedule add -p ${POOL} \ + --image ${IMAGE_PREFIX}1 2m && break + sleep 10 +done +rbd mirror snapshot schedule ls -p ${POOL} --image ${IMAGE_PREFIX}1 | + grep 'every 2m' +# Verify that the schedule present before client blocklisting is preserved +rbd mirror snapshot schedule ls -p ${POOL} --image ${IMAGE_PREFIX}1 | + grep 'every 1m' +rbd mirror snapshot schedule rm -p ${POOL} --image ${IMAGE_PREFIX}1 2m +for ((i = 1; i <= ${NUM_IMAGES}; i++)); do + rbd mirror snapshot schedule rm -p ${POOL} --image ${IMAGE_PREFIX}$i 1m +done + +# cleanup +killall fio || true +wait +for ((i = 1; i <= ${NUM_IMAGES}; i++)); do + sudo rbd device unmap ${DEVS[$i]} +done + +echo OK diff --git a/ceph/qa/workunits/rgw/common.py b/ceph/qa/workunits/rgw/common.py index 235c36c95..2c9c5d035 100755 --- a/ceph/qa/workunits/rgw/common.py +++ b/ceph/qa/workunits/rgw/common.py @@ -5,6 +5,9 @@ import subprocess import logging as log import boto3 import botocore.exceptions +import random +import json +from time import sleep log.basicConfig(format = '%(message)s', level=log.DEBUG) log.getLogger('botocore').setLevel(log.CRITICAL) @@ -55,3 +58,46 @@ def boto_connect(access_key, secret_key, config=None): except botocore.exceptions.ConnectionError: # retry with ssl return try_connect('443', True, 'https') + +def put_objects(bucket, key_list): + objs = [] + for key in key_list: + o = bucket.put_object(Key=key, Body=b"some_data") + objs.append((o.key, o.version_id)) + return objs + +def create_unlinked_objects(conn, bucket, key_list): + # creates an unlinked/unlistable object for each key in key_list + + object_versions = [] + try: + exec_cmd('ceph config set client rgw_debug_inject_set_olh_err 2') + exec_cmd('ceph config set client rgw_debug_inject_olh_cancel_modification_err true') + sleep(1) + for key in key_list: + tag = str(random.randint(0, 1_000_000)) + try: + bucket.put_object(Key=key, Body=b"some_data", Metadata = { + 'tag': tag, + }) + except Exception as e: + log.debug(e) + out = exec_cmd(f'radosgw-admin bi list --bucket {bucket.name} --object {key}') + instance_entries = filter( + lambda x: x['type'] == 'instance', + json.loads(out.replace(b'\x80', b'0x80'))) + found = False + for ie in instance_entries: + instance_id = ie['entry']['instance'] + ov = conn.ObjectVersion(bucket.name, key, instance_id).head() + if ov['Metadata'] and ov['Metadata']['tag'] == tag: + object_versions.append((key, instance_id)) + found = True + break + if not found: + raise Exception(f'failed to create unlinked object for key={key}') + finally: + exec_cmd('ceph config rm client rgw_debug_inject_set_olh_err') + exec_cmd('ceph config rm client rgw_debug_inject_olh_cancel_modification_err') + return object_versions + diff --git a/ceph/qa/workunits/rgw/run-bucket-check.sh b/ceph/qa/workunits/rgw/run-bucket-check.sh new file mode 100755 index 000000000..85e02db5e --- /dev/null +++ b/ceph/qa/workunits/rgw/run-bucket-check.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -ex + +# assume working ceph environment (radosgw-admin in path) and rgw on localhost:80 +# localhost::443 for ssl + +mydir=`dirname $0` + +python3 -m venv $mydir +source $mydir/bin/activate +pip install pip --upgrade +pip install boto3 + +## run test +$mydir/bin/python3 $mydir/test_rgw_bucket_check.py + +deactivate +echo OK. + diff --git a/ceph/qa/workunits/rgw/test_rgw_bucket_check.py b/ceph/qa/workunits/rgw/test_rgw_bucket_check.py new file mode 100755 index 000000000..bfa6d65d6 --- /dev/null +++ b/ceph/qa/workunits/rgw/test_rgw_bucket_check.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 + +import logging as log +import json +import botocore +from common import exec_cmd, create_user, boto_connect, put_objects, create_unlinked_objects +from botocore.config import Config + +""" +Tests behavior of radosgw-admin bucket check commands. +""" +# The test cases in this file have been annotated for inventory. +# To extract the inventory (in csv format) use the command: +# +# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' +# +# + +""" Constants """ +USER = 'check-tester' +DISPLAY_NAME = 'Check Testing' +ACCESS_KEY = 'OJODXSLNX4LUNHQG99PA' +SECRET_KEY = '3l6ffld34qaymfomuh832j94738aie2x4p2o8h6n' +BUCKET_NAME = 'check-bucket' + +def main(): + """ + execute bucket check commands + """ + create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY) + + connection = boto_connect(ACCESS_KEY, SECRET_KEY, Config(retries = { + 'total_max_attempts': 1, + })) + + # pre-test cleanup + try: + bucket = connection.Bucket(BUCKET_NAME) + bucket.objects.all().delete() + bucket.object_versions.all().delete() + bucket.delete() + except botocore.exceptions.ClientError as e: + if not e.response['Error']['Code'] == 'NoSuchBucket': + raise + + bucket = connection.create_bucket(Bucket=BUCKET_NAME) + + null_version_keys = ['a', 'z'] + null_version_objs = put_objects(bucket, null_version_keys) + + connection.BucketVersioning(BUCKET_NAME).enable() + + ok_keys = ['a', 'b', 'c', 'd'] + unlinked_keys = ['c', 'd', 'e', 'f'] + ok_objs = put_objects(bucket, ok_keys) + + # TESTCASE 'recalculated bucket check stats are correct' + log.debug('TEST: recalculated bucket check stats are correct\n') + exec_cmd(f'radosgw-admin bucket check --fix --bucket {BUCKET_NAME}') + out = exec_cmd(f'radosgw-admin bucket stats --bucket {BUCKET_NAME}') + json_out = json.loads(out) + log.debug(json_out['usage']) + assert json_out['usage']['rgw.main']['num_objects'] == 6 + + # TESTCASE 'bucket check unlinked does not report normal entries' + log.debug('TEST: bucket check unlinked does not report normal entries\n') + out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys') + json_out = json.loads(out) + assert len(json_out) == 0 + + unlinked_objs = create_unlinked_objects(connection, bucket, unlinked_keys) + + # TESTCASE 'bucket check unlinked finds unlistable entries' + log.debug('TEST: bucket check unlinked finds unlistable entries\n') + out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys') + json_out = json.loads(out) + assert len(json_out) == len(unlinked_keys) + + # TESTCASE 'unlinked entries are not listable' + log.debug('TEST: unlinked entries are not listable\n') + for ov in bucket.object_versions.all(): + assert (ov.key, ov.version_id) not in unlinked_objs, f'object "{ov.key}:{ov.version_id}" was found in bucket listing' + + # TESTCASE 'GET returns 404 for unlinked entry keys that have no other versions' + log.debug('TEST: GET returns 404 for unlinked entry keys that have no other versions\n') + noent_keys = set(unlinked_keys) - set(ok_keys) + for key in noent_keys: + try: + bucket.Object(key).get() + assert False, 'GET did not return 404 for key={key} with no prior successful PUT' + except botocore.exceptions.ClientError as e: + assert e.response['ResponseMetadata']['HTTPStatusCode'] == 404 + + # TESTCASE 'bucket check unlinked fixes unlistable entries' + log.debug('TEST: bucket check unlinked fixes unlistable entries\n') + out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys') + json_out = json.loads(out) + assert len(json_out) == len(unlinked_keys) + for o in unlinked_objs: + try: + connection.ObjectVersion(bucket.name, o[0], o[1]).head() + assert False, f'head for unlistable object {o[0]}:{o[1]} succeeded after fix' + except botocore.exceptions.ClientError as e: + assert e.response['ResponseMetadata']['HTTPStatusCode'] == 404 + + # TESTCASE 'bucket check unlinked fix does not affect normal entries' + log.debug('TEST: bucket check unlinked does not affect normal entries\n') + all_listable = list(bucket.object_versions.all()) + assert len(all_listable) == len(ok_keys) + len(null_version_keys), 'some normal objects were not accounted for in object listing after unlinked fix' + for o in ok_objs: + assert o in map(lambda x: (x.key, x.version_id), all_listable), "normal object not listable after fix" + connection.ObjectVersion(bucket.name, o[0], o[1]).head() + + # TESTCASE 'bucket check unlinked does not find new unlistable entries after fix' + log.debug('TEST: bucket check unlinked does not find new unlistable entries after fix\n') + out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --min-age-hours 0 --dump-keys') + json_out = json.loads(out) + assert len(json_out) == 0 + + # for this set of keys we can produce leftover OLH object/entries by + # deleting the normal object instance since we should already have a leftover + # pending xattr on the OLH object due to the errors associated with the + # prior unlinked entries that were created for the same keys + leftover_pending_xattr_keys = set(ok_keys).intersection(unlinked_keys) + objs_to_delete = filter(lambda x: x[0] in leftover_pending_xattr_keys, ok_objs) + + for o in objs_to_delete: + connection.ObjectVersion(bucket.name, o[0], o[1]).delete() + + for key in leftover_pending_xattr_keys: + out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME} --object {key}') + idx_entries = json.loads(out.replace(b'\x80', b'0x80')) + assert len(idx_entries) > 0, 'failed to create leftover OLH entries for key {key}' + + # TESTCASE 'bucket check olh finds leftover OLH entries' + log.debug('TEST: bucket check olh finds leftover OLH entries\n') + out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --dump-keys') + json_out = json.loads(out) + assert len(json_out) == len(leftover_pending_xattr_keys) + + # TESTCASE 'bucket check olh fixes leftover OLH entries' + log.debug('TEST: bucket check olh fixes leftover OLH entries\n') + out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --fix --rgw-olh-pending-timeout-sec 0 --dump-keys') + json_out = json.loads(out) + assert len(json_out) == len(leftover_pending_xattr_keys) + + for key in leftover_pending_xattr_keys: + out = exec_cmd(f'radosgw-admin bi list --bucket {BUCKET_NAME} --object {key}') + idx_entries = json.loads(out.replace(b'\x80', b'0x80')) + assert len(idx_entries) == 0, 'index entries still exist for key={key} after olh fix' + + # TESTCASE 'bucket check olh does not find new leftover OLH entries after fix' + log.debug('TEST: bucket check olh does not find new leftover OLH entries after fix\n') + out = exec_cmd(f'radosgw-admin bucket check olh --bucket {BUCKET_NAME} --dump-keys') + json_out = json.loads(out) + assert len(json_out) == 0 + + # TESTCASE 'bucket check fixes do not affect null version objects' + log.debug('TEST: verify that bucket check fixes do not affect null version objects\n') + for o in null_version_objs: + connection.ObjectVersion(bucket.name, o[0], 'null').head() + + all_versions = list(map(lambda x: (x.key, x.version_id), bucket.object_versions.all())) + for key in null_version_keys: + assert (key, 'null') in all_versions + + # TESTCASE 'bucket check stats are correct in the presence of unlinked entries' + log.debug('TEST: bucket check stats are correct in the presence of unlinked entries\n') + bucket.object_versions.all().delete() + null_version_objs = put_objects(bucket, null_version_keys) + ok_objs = put_objects(bucket, ok_keys) + unlinked_objs = create_unlinked_objects(connection, bucket, unlinked_keys) + exec_cmd(f'radosgw-admin bucket check --fix --bucket {BUCKET_NAME}') + out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys') + json_out = json.loads(out) + assert len(json_out) == len(unlinked_keys) + bucket.object_versions.all().delete() + out = exec_cmd(f'radosgw-admin bucket stats --bucket {BUCKET_NAME}') + json_out = json.loads(out) + log.debug(json_out['usage']) + assert json_out['usage']['rgw.main']['size'] == 0 + assert json_out['usage']['rgw.main']['num_objects'] == 0 + assert json_out['usage']['rgw.main']['size_actual'] == 0 + assert json_out['usage']['rgw.main']['size_kb'] == 0 + assert json_out['usage']['rgw.main']['size_kb_actual'] == 0 + assert json_out['usage']['rgw.main']['size_kb_utilized'] == 0 + + # Clean up + log.debug("Deleting bucket {}".format(BUCKET_NAME)) + bucket.object_versions.all().delete() + bucket.delete() + +main() +log.info("Completed bucket check tests") diff --git a/ceph/qa/workunits/rgw/test_rgw_reshard.py b/ceph/qa/workunits/rgw/test_rgw_reshard.py index 0b370dc72..6326e7b17 100755 --- a/ceph/qa/workunits/rgw/test_rgw_reshard.py +++ b/ceph/qa/workunits/rgw/test_rgw_reshard.py @@ -5,7 +5,7 @@ import time import logging as log import json import os -from common import exec_cmd, boto_connect, create_user +from common import exec_cmd, boto_connect, create_user, put_objects, create_unlinked_objects """ Rgw manual and dynamic resharding testing against a running instance @@ -145,7 +145,7 @@ def main(): execute manual and dynamic resharding commands """ create_user(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY) - + connection = boto_connect(ACCESS_KEY, SECRET_KEY) # create a bucket @@ -277,6 +277,28 @@ def main(): ver_bucket.put_object(Key='put_during_reshard', Body=b"some_data") log.debug('put object successful') + # TESTCASE 'check that bucket stats are correct after reshard with unlinked entries' + log.debug('TEST: check that bucket stats are correct after reshard with unlinked entries\n') + ver_bucket.object_versions.all().delete() + ok_keys = ['a', 'b', 'c'] + unlinked_keys = ['x', 'y', 'z'] + put_objects(ver_bucket, ok_keys) + create_unlinked_objects(connection, ver_bucket, unlinked_keys) + cmd = exec_cmd(f'radosgw-admin bucket reshard --bucket {VER_BUCKET_NAME} --num-shards 17 --yes-i-really-mean-it') + out = exec_cmd(f'radosgw-admin bucket check unlinked --bucket {VER_BUCKET_NAME} --fix --min-age-hours 0 --rgw-olh-pending-timeout-sec 0 --dump-keys') + json_out = json.loads(out) + assert len(json_out) == len(unlinked_keys) + ver_bucket.object_versions.all().delete() + out = exec_cmd(f'radosgw-admin bucket stats --bucket {VER_BUCKET_NAME}') + json_out = json.loads(out) + log.debug(json_out['usage']) + assert json_out['usage']['rgw.main']['size'] == 0 + assert json_out['usage']['rgw.main']['num_objects'] == 0 + assert json_out['usage']['rgw.main']['size_actual'] == 0 + assert json_out['usage']['rgw.main']['size_kb'] == 0 + assert json_out['usage']['rgw.main']['size_kb_actual'] == 0 + assert json_out['usage']['rgw.main']['size_kb_utilized'] == 0 + # Clean up log.debug("Deleting bucket {}".format(BUCKET_NAME)) bucket.objects.all().delete() diff --git a/ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.py b/ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.py new file mode 100755 index 000000000..b3cb2d5ab --- /dev/null +++ b/ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.py @@ -0,0 +1,121 @@ +import boto3 +import botocore.exceptions +import sys +import os +import subprocess + +#boto3.set_stream_logger(name='botocore') + +# handles two optional system arguments: +# : default is "bkt134" +# <0 or 1> : 0 -> upload aborted, 1 -> completed; default is completed + +if len(sys.argv) >= 2: + bucket_name = sys.argv[1] +else: + bucket_name = "bkt314738362229" +print("bucket nams is %s" % bucket_name) + +complete_mpu = True +if len(sys.argv) >= 3: + complete_mpu = int(sys.argv[2]) > 0 + +versioned_bucket = False +if len(sys.argv) >= 4: + versioned_bucket = int(sys.argv[3]) > 0 + +rgw_host = os.environ['RGW_HOST'] +access_key = os.environ['RGW_ACCESS_KEY'] +secret_key = os.environ['RGW_SECRET_KEY'] + +try: + endpoint='http://%s:%d' % (rgw_host, 80) + client = boto3.client('s3', + endpoint_url=endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + res = client.create_bucket(Bucket=bucket_name) +except botocore.exceptions.EndpointConnectionError: + try: + endpoint='https://%s:%d' % (rgw_host, 443) + client = boto3.client('s3', + endpoint_url=endpoint, + verify=False, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + res = client.create_bucket(Bucket=bucket_name) + except botocore.exceptions.EndpointConnectionError: + endpoint='http://%s:%d' % (rgw_host, 8000) + client = boto3.client('s3', + endpoint_url=endpoint, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key) + res = client.create_bucket(Bucket=bucket_name) + +print("endpoint is %s" % endpoint) + +if versioned_bucket: + res = client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'MFADelete': 'Disabled', + 'Status': 'Enabled'} + ) + +key = "mpu_test4" +nparts = 2 +ndups = 11 +do_reupload = True + +part_path = "/tmp/mp_part_5m" +subprocess.run(["dd", "if=/dev/urandom", "of=" + part_path, "bs=1M", "count=5"], check=True) + +f = open(part_path, 'rb') + +res = client.create_multipart_upload(Bucket=bucket_name, Key=key) +mpu_id = res["UploadId"] + +print("start UploadId=%s" % (mpu_id)) + +parts = [] +parts2 = [] + +for ix in range(0,nparts): + part_num = ix + 1 + f.seek(0) + res = client.upload_part(Body=f, Bucket=bucket_name, Key=key, + UploadId=mpu_id, PartNumber=part_num) + # save + etag = res['ETag'] + part = {'ETag': etag, 'PartNumber': part_num} + print("phase 1 uploaded part %s" % part) + parts.append(part) + +if do_reupload: + # just re-upload part 1 + part_num = 1 + for ix in range(0,ndups): + f.seek(0) + res = client.upload_part(Body=f, Bucket=bucket_name, Key=key, + UploadId=mpu_id, PartNumber=part_num) + etag = res['ETag'] + part = {'ETag': etag, 'PartNumber': part_num} + print ("phase 2 uploaded part %s" % part) + + # save + etag = res['ETag'] + part = {'ETag': etag, 'PartNumber': part_num} + parts2.append(part) + +if complete_mpu: + print("completing multipart upload, parts=%s" % parts) + res = client.complete_multipart_upload( + Bucket=bucket_name, Key=key, UploadId=mpu_id, + MultipartUpload={'Parts': parts}) +else: + print("aborting multipart upload, parts=%s" % parts) + res = client.abort_multipart_upload( + Bucket=bucket_name, Key=key, UploadId=mpu_id) + +# clean up +subprocess.run(["rm", "-f", part_path], check=True) diff --git a/ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh b/ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh new file mode 100755 index 000000000..5d73fd048 --- /dev/null +++ b/ceph/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh @@ -0,0 +1,110 @@ +#!/usr/bin/env bash + +# INITIALIZATION + +mydir=$(dirname $0) +data_pool=default.rgw.buckets.data +orphan_list_out=/tmp/orphan_list.$$ +radoslist_out=/tmp/radoslist.$$ +rados_ls_out=/tmp/rados_ls.$$ +diff_out=/tmp/diff.$$ + +rgw_host="$(hostname --fqdn)" +echo "INFO: fully qualified domain name: $rgw_host" + +export RGW_ACCESS_KEY="0555b35654ad1656d804" +export RGW_SECRET_KEY="h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==" +export RGW_HOST="${RGW_HOST:-$rgw_host}" + +# random argument determines if multipart is aborted or completed 50/50 +outcome=$((RANDOM % 2)) +if [ $outcome -eq 0 ] ;then + echo "== TESTING *ABORTING* MULTIPART UPLOAD WITH RE-UPLOADS ==" +else + echo "== TESTING *COMPLETING* MULTIPART UPLOAD WITH RE-UPLOADS ==" +fi + +# random argument determines if multipart is aborted or completed 50/50 +versioning=$((RANDOM % 2)) +if [ $versioning -eq 0 ] ;then + echo "== TESTING NON-VERSIONED BUCKET ==" +else + echo "== TESTING VERSIONED BUCKET ==" +fi + +# create a randomized bucket name +bucket="reupload-bkt-$((RANDOM % 899999 + 100000))" + + +# SET UP PYTHON VIRTUAL ENVIRONMENT + +# install boto3 +python3 -m venv $mydir +source $mydir/bin/activate +pip install pip --upgrade +pip install boto3 + + +# CREATE RGW USER IF NECESSARY + +if radosgw-admin user info --access-key $RGW_ACCESS_KEY 2>/dev/null ;then + echo INFO: user already exists +else + echo INFO: creating user + radosgw-admin user create --uid testid \ + --access-key $RGW_ACCESS_KEY \ + --secret $RGW_SECRET_KEY \ + --display-name 'M. Tester' \ + --email tester@ceph.com 2>/dev/null +fi + + +# RUN REUPLOAD TEST + +$mydir/bin/python3 ${mydir}/test_rgw_s3_mp_reupload.py $bucket $outcome $versioning + + +# ANALYZE FOR ERRORS +# (NOTE: for now we're choosing not to use the rgw-orphan-list tool) + +# force garbage collection to remove extra parts +radosgw-admin gc process --include-all 2>/dev/null + +marker=$(radosgw-admin metadata get bucket:$bucket 2>/dev/null | grep bucket_id | sed 's/.*: "\(.*\)".*/\1/') + +# determine expected rados objects +radosgw-admin bucket radoslist --bucket=$bucket 2>/dev/null | sort >$radoslist_out +echo "radosgw-admin bucket radoslist:" +cat $radoslist_out + +# determine found rados objects +rados ls -p $data_pool 2>/dev/null | grep "^$marker" | sort >$rados_ls_out +echo "rados ls:" +cat $rados_ls_out + +# compare expected and found +diff $radoslist_out $rados_ls_out >$diff_out +if [ $(cat $diff_out | wc -l) -ne 0 ] ;then + error=1 + echo "ERROR: Found differences between expected and actual rados objects for test bucket." + echo " note: indicators: '>' found but not expected; '<' expected but not found." + cat $diff_out +fi + + +# CLEAN UP + +deactivate + +rm -f $orphan_list_out $radoslist_out $rados_ls_out $diff_out + + +# PRODUCE FINAL RESULTS + +if [ -n "$error" ] ;then + echo "== FAILED ==" + exit 1 +fi + +echo "== PASSED ==" +exit 0 diff --git a/ceph/src/.git_version b/ceph/src/.git_version index 911f45990..d9fc6dd6b 100644 --- a/ceph/src/.git_version +++ b/ceph/src/.git_version @@ -1,2 +1,2 @@ -5dd24139a1eada541a3bc16b6941c5dde975e26d -18.2.0 +7fe91d5d5842e04be3b4f514d6dd990c54b29c76 +18.2.1 diff --git a/ceph/src/CMakeLists.txt b/ceph/src/CMakeLists.txt index 0a841c5db..e1f90f72b 100644 --- a/ceph/src/CMakeLists.txt +++ b/ceph/src/CMakeLists.txt @@ -321,7 +321,7 @@ if(NOT TARGET RapidJSON::RapidJSON) endif() option(WITH_FMT_HEADER_ONLY "use header-only version of fmt library" OFF) -set(WITH_FMT_VERSION "7.0.0" CACHE +set(WITH_FMT_VERSION "8.1.1" CACHE STRING "build with fmt version") find_package(fmt ${WITH_FMT_VERSION} QUIET) if(fmt_FOUND) diff --git a/ceph/src/SimpleRADOSStriper.cc b/ceph/src/SimpleRADOSStriper.cc index bbbf15527..aa47d8112 100644 --- a/ceph/src/SimpleRADOSStriper.cc +++ b/ceph/src/SimpleRADOSStriper.cc @@ -131,7 +131,7 @@ int SimpleRADOSStriper::remove() auto ext = get_first_extent(); if (int rc = ioctx.remove(ext.soid); rc < 0) { - d(5) << " remove failed: " << cpp_strerror(rc) << dendl; + d(1) << " remove failed: " << cpp_strerror(rc) << dendl; return rc; } @@ -171,7 +171,7 @@ int SimpleRADOSStriper::wait_for_aios(bool block) } } if (rc) { - d(5) << " aio failed: " << cpp_strerror(rc) << dendl; + d(1) << " aio failed: " << cpp_strerror(rc) << dendl; if (aios_failure == 0) { aios_failure = rc; } @@ -257,7 +257,7 @@ int SimpleRADOSStriper::open() op.getxattr(XATTR_ALLOCATED, &bl_alloc, &prval_alloc); op.getxattr(XATTR_VERSION, &bl_version, &prval_version); if (int rc = ioctx.operate(ext.soid, &op, &pbl); rc < 0) { - d(5) << " getxattr failed: " << cpp_strerror(rc) << dendl; + d(1) << " getxattr failed: " << cpp_strerror(rc) << dendl; return rc; } exclusive_holder = bl_excl.to_str(); @@ -297,7 +297,7 @@ int SimpleRADOSStriper::shrink_alloc(uint64_t a) auto ext = get_next_extent(offset, len); auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion()); if (int rc = ioctx.aio_remove(ext.soid, aiocp.get()); rc < 0) { - d(5) << " aio_remove failed: " << cpp_strerror(rc) << dendl; + d(1) << " aio_remove failed: " << cpp_strerror(rc) << dendl; return rc; } removes.emplace_back(std::move(aiocp)); @@ -307,7 +307,7 @@ int SimpleRADOSStriper::shrink_alloc(uint64_t a) for (auto& aiocp : removes) { if (int rc = aiocp->wait_for_complete(); rc < 0 && rc != -ENOENT) { - d(5) << " aio_remove failed: " << cpp_strerror(rc) << dendl; + d(1) << " aio_remove failed: " << cpp_strerror(rc) << dendl; return rc; } } @@ -320,7 +320,7 @@ int SimpleRADOSStriper::shrink_alloc(uint64_t a) op.setxattr(XATTR_VERSION, uint2bl(version+1)); d(15) << " updating version to " << (version+1) << dendl; if (int rc = ioctx.aio_operate(ext.soid, aiocp.get(), &op); rc < 0) { - d(5) << " update failed: " << cpp_strerror(rc) << dendl; + d(1) << " update failed: " << cpp_strerror(rc) << dendl; return rc; } /* we need to wait so we don't have dangling extents */ @@ -726,7 +726,7 @@ int SimpleRADOSStriper::lock(uint64_t timeoutms) } if (int rc = open(); rc < 0) { - d(5) << " open failed: " << cpp_strerror(rc) << dendl; + d(1) << " open failed: " << cpp_strerror(rc) << dendl; return rc; } diff --git a/ceph/src/ceph-volume/ceph_volume/api/lvm.py b/ceph/src/ceph-volume/ceph_volume/api/lvm.py index 16cbc08b2..dcc4f1862 100644 --- a/ceph/src/ceph-volume/ceph_volume/api/lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/api/lvm.py @@ -6,6 +6,7 @@ set of utilities for interacting with LVM. import logging import os import uuid +import re from itertools import repeat from math import floor from ceph_volume import process, util, conf @@ -1209,3 +1210,39 @@ def get_lv_by_fullname(full_name): except ValueError: res_lv = None return res_lv + +def get_lv_path_from_mapper(mapper): + """ + This functions translates a given mapper device under the format: + /dev/mapper/LV to the format /dev/VG/LV. + eg: + from: + /dev/mapper/ceph--c1a97e46--234c--46aa--a549--3ca1d1f356a9-osd--block--32e8e896--172e--4a38--a06a--3702598510ec + to: + /dev/ceph-c1a97e46-234c-46aa-a549-3ca1d1f356a9/osd-block-32e8e896-172e-4a38-a06a-3702598510ec + """ + results = re.split(r'^\/dev\/mapper\/(.+\w)-(\w.+)', mapper) + results = list(filter(None, results)) + + if len(results) != 2: + return None + + return f"/dev/{results[0].replace('--', '-')}/{results[1].replace('--', '-')}" + +def get_mapper_from_lv_path(lv_path): + """ + This functions translates a given lv path under the format: + /dev/VG/LV to the format /dev/mapper/LV. + eg: + from: + /dev/ceph-c1a97e46-234c-46aa-a549-3ca1d1f356a9/osd-block-32e8e896-172e-4a38-a06a-3702598510ec + to: + /dev/mapper/ceph--c1a97e46--234c--46aa--a549--3ca1d1f356a9-osd--block--32e8e896--172e--4a38--a06a--3702598510ec + """ + results = re.split(r'^\/dev\/(.+\w)-(\w.+)', lv_path) + results = list(filter(None, results)) + + if len(results) != 2: + return None + + return f"/dev/mapper/{results[0].replace('-', '--')}/{results[1].replace('-', '--')}" diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py index 46846a1dc..0cc8d71ae 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py @@ -28,7 +28,7 @@ def deactivate_osd(osd_id=None, osd_uuid=None): for lv in lvs: if lv.tags.get('ceph.encrypted', '0') == '1': - encryption.dmcrypt_close(lv.lv_uuid) + encryption.dmcrypt_close(mapping=lv.lv_uuid, skip_path_check=True) class Deactivate(object): diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py index 86159fd50..64589a2d6 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/migrate.py @@ -6,6 +6,7 @@ from textwrap import dedent from ceph_volume.util import system, disk, merge_dict from ceph_volume.util.device import Device from ceph_volume.util.arg_validators import valid_osd_id +from ceph_volume.util import encryption as encryption_utils from ceph_volume import decorators, terminal, process from ceph_volume.api import lvm as api from ceph_volume.systemd import systemctl @@ -300,6 +301,15 @@ class Migrate(object): osd_path, self.get_filename_by_type(type))] return ret + def close_encrypted(self, source_devices): + # close source device(-s) if they're encrypted and have been removed + for device,type in source_devices: + if (type == 'db' or type == 'wal'): + logger.info("closing dmcrypt volume {}" + .format(device.lv_api.lv_uuid)) + encryption_utils.dmcrypt_close( + mapping = device.lv_api.lv_uuid, skip_path_check=True) + @decorators.needs_root def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv): source_devices = self.get_source_devices(devices) @@ -312,9 +322,14 @@ class Migrate(object): "Unable to migrate to : {}".format(self.args.target)) target_path = target_lv.lv_path - + tag_tracker = VolumeTagTracker(devices, target_lv) + # prepare and encrypt target if data volume is encrypted + if tag_tracker.data_device.lv_api.encrypted: + secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) + mlogger.info(' preparing dmcrypt for {}, uuid {}'.format(target_lv.lv_path, target_lv.lv_uuid)) + target_path = encryption_utils.prepare_dmcrypt( + key=secret, device=target_path, mapping=target_lv.lv_uuid) try: - tag_tracker = VolumeTagTracker(devices, target_lv) # we need to update lvm tags for all the remaining volumes # and clear for ones which to be removed @@ -340,10 +355,13 @@ class Migrate(object): 'Failed to migrate device, error code:{}'.format(exit_code)) raise SystemExit( 'Failed to migrate to : {}'.format(self.args.target)) - else: - system.chown(os.path.join(osd_path, "block.{}".format( - target_type))) - terminal.success('Migration successful.') + + system.chown(os.path.join(osd_path, "block.{}".format( + target_type))) + if tag_tracker.data_device.lv_api.encrypted: + self.close_encrypted(source_devices) + terminal.success('Migration successful.') + except: tag_tracker.undo() raise @@ -391,8 +409,9 @@ class Migrate(object): 'Failed to migrate device, error code:{}'.format(exit_code)) raise SystemExit( 'Failed to migrate to : {}'.format(self.args.target)) - else: - terminal.success('Migration successful.') + if tag_tracker.data_device.lv_api.encrypted: + self.close_encrypted(source_devices) + terminal.success('Migration successful.') except: tag_tracker.undo() raise @@ -574,7 +593,14 @@ class NewVolume(object): mlogger.info( 'Making new volume at {} for OSD: {} ({})'.format( target_lv.lv_path, osd_id, osd_path)) + target_path = target_lv.lv_path tag_tracker = VolumeTagTracker(devices, target_lv) + # prepare and encrypt target if data volume is encrypted + if tag_tracker.data_device.lv_api.encrypted: + secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) + mlogger.info(' preparing dmcrypt for {}, uuid {}'.format(target_lv.lv_path, target_lv.lv_uuid)) + target_path = encryption_utils.prepare_dmcrypt( + key=secret, device=target_path, mapping=target_lv.lv_uuid) try: tag_tracker.update_tags_when_lv_create(self.create_type) @@ -584,7 +610,7 @@ class NewVolume(object): '--path', osd_path, '--dev-target', - target_lv.lv_path, + target_path, '--command', 'bluefs-bdev-new-{}'.format(self.create_type) ]) diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py index 1cf19d98d..85c8a1467 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/prepare.py @@ -23,19 +23,7 @@ def prepare_dmcrypt(key, device, device_type, tags): return '' tag_name = 'ceph.%s_uuid' % device_type uuid = tags[tag_name] - # format data device - encryption_utils.luks_format( - key, - device - ) - encryption_utils.luks_open( - key, - device, - uuid - ) - - return '/dev/mapper/%s' % uuid - + return encryption_utils.prepare_dmcrypt(key, device, uuid) def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid): """ diff --git a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py index 2f6e00f87..d4d78ad01 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/lvm/zap.py @@ -302,9 +302,8 @@ class Zap(object): self.zap(devices) def dmcrypt_close(self, dmcrypt_uuid): - dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid) - mlogger.info("Closing encrypted path %s", dmcrypt_path) - encryption.dmcrypt_close(dmcrypt_path) + mlogger.info("Closing encrypted volume %s", dmcrypt_uuid) + encryption.dmcrypt_close(mapping=dmcrypt_uuid, skip_path_check=True) def main(self): sub_command_help = dedent(""" diff --git a/ceph/src/ceph-volume/ceph_volume/devices/raw/common.py b/ceph/src/ceph-volume/ceph_volume/devices/raw/common.py index 19de81fe5..89ee285be 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/raw/common.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/raw/common.py @@ -49,4 +49,10 @@ def create_parser(prog, description): action='store_true', help='Enable device encryption via dm-crypt', ) + parser.add_argument( + '--osd-id', + help='Reuse an existing OSD id', + default=None, + type=arg_validators.valid_osd_id, + ) return parser diff --git a/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py b/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py index a9eb41312..794bb18c1 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/raw/list.py @@ -5,7 +5,7 @@ import logging from textwrap import dedent from ceph_volume import decorators, process from ceph_volume.util import disk - +from typing import Any, Dict, List logger = logging.getLogger(__name__) @@ -66,46 +66,57 @@ class List(object): def __init__(self, argv): self.argv = argv + def is_atari_partitions(self, _lsblk: Dict[str, Any]) -> bool: + dev = _lsblk['NAME'] + if _lsblk.get('PKNAME'): + parent = _lsblk['PKNAME'] + try: + if disk.has_bluestore_label(parent): + logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent), + 'device is likely a phantom Atari partition. device info: {}'.format(_lsblk))) + return True + except OSError as e: + logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev), + 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e))) + return True + return False + + def exclude_atari_partitions(self, _lsblk_all: Dict[str, Any]) -> List[Dict[str, Any]]: + return [_lsblk for _lsblk in _lsblk_all if not self.is_atari_partitions(_lsblk)] + def generate(self, devs=None): logger.debug('Listing block devices via lsblk...') - info_devices = disk.lsblk_all(abspath=True) - if devs is None or devs == []: + info_devices = [] + if not devs or not any(devs): # If no devs are given initially, we want to list ALL devices including children and # parents. Parent disks with child partitions may be the appropriate device to return if # the parent disk has a bluestore header, but children may be the most appropriate # devices to return if the parent disk does not have a bluestore header. + info_devices = disk.lsblk_all(abspath=True) devs = [device['NAME'] for device in info_devices if device.get('NAME',)] + else: + for dev in devs: + info_devices.append(disk.lsblk(dev, abspath=True)) + + # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret + # bluestore's on-disk format as an Atari partition table. These false Atari partitions + # can be interpreted as real OSDs if a bluestore OSD was previously created on the false + # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a + # parent, it is a child. If the parent is a valid bluestore OSD, the child will only + # exist if it is a phantom Atari partition, and the child should be ignored. If the + # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to + # determine whether a parent is bluestore, we should err on the side of not reporting + # the child so as not to give a false negative. + info_devices = self.exclude_atari_partitions(info_devices) result = {} logger.debug('inspecting devices: {}'.format(devs)) - for dev in devs: - # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret - # bluestore's on-disk format as an Atari partition table. These false Atari partitions - # can be interpreted as real OSDs if a bluestore OSD was previously created on the false - # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a - # parent, it is a child. If the parent is a valid bluestore OSD, the child will only - # exist if it is a phantom Atari partition, and the child should be ignored. If the - # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to - # determine whether a parent is bluestore, we should err on the side of not reporting - # the child so as not to give a false negative. - for info_device in info_devices: - if 'PKNAME' in info_device and info_device['PKNAME'] != "": - parent = info_device['PKNAME'] - try: - if disk.has_bluestore_label(parent): - logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent), - 'device is likely a phantom Atari partition. device info: {}'.format(info_device))) - continue - except OSError as e: - logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev), - 'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e))) - continue - - bs_info = _get_bluestore_info(dev) + for info_device in info_devices: + bs_info = _get_bluestore_info(info_device['NAME']) if bs_info is None: # None is also returned in the rare event that there is an issue reading info from # a BlueStore disk, so be sure to log our assumption that it isn't bluestore - logger.info('device {} does not have BlueStore information'.format(dev)) + logger.info('device {} does not have BlueStore information'.format(info_device['NAME'])) continue uuid = bs_info['osd_uuid'] if uuid not in result: diff --git a/ceph/src/ceph-volume/ceph_volume/devices/raw/prepare.py b/ceph/src/ceph-volume/ceph_volume/devices/raw/prepare.py index 6165da3a6..b3201a89d 100644 --- a/ceph/src/ceph-volume/ceph_volume/devices/raw/prepare.py +++ b/ceph/src/ceph-volume/ceph_volume/devices/raw/prepare.py @@ -22,18 +22,7 @@ def prepare_dmcrypt(key, device, device_type, fsid): return '' kname = disk.lsblk(device)['KNAME'] mapping = 'ceph-{}-{}-{}-dmcrypt'.format(fsid, kname, device_type) - # format data device - encryption_utils.luks_format( - key, - device - ) - encryption_utils.luks_open( - key, - device, - mapping - ) - - return '/dev/mapper/{}'.format(mapping) + return encryption_utils.prepare_dmcrypt(key, device, mapping) def prepare_bluestore(block, wal, db, secrets, osd_id, fsid, tmpfs): """ @@ -122,7 +111,9 @@ class Prepare(object): # reuse a given ID if it exists, otherwise create a new ID self.osd_id = prepare_utils.create_id( - osd_fsid, json.dumps(secrets)) + osd_fsid, + json.dumps(secrets), + osd_id=self.args.osd_id) prepare_bluestore( self.args.data, diff --git a/ceph/src/ceph-volume/ceph_volume/inventory/main.py b/ceph/src/ceph-volume/ceph_volume/inventory/main.py index aa70e92f1..da0ff6c88 100644 --- a/ceph/src/ceph-volume/ceph_volume/inventory/main.py +++ b/ceph/src/ceph-volume/ceph_volume/inventory/main.py @@ -45,18 +45,27 @@ class Inventory(object): 'libstoragemgmt'), default=False, ) + parser.add_argument( + '--list-all', + action='store_true', + help=('Whether ceph-volume should list lvm devices'), + default=False + ) self.args = parser.parse_args(self.argv) if self.args.path: self.format_report(Device(self.args.path, with_lsm=self.args.with_lsm)) else: self.format_report(Devices(filter_for_batch=self.args.filter_for_batch, - with_lsm=self.args.with_lsm)) + with_lsm=self.args.with_lsm, + list_all=self.args.list_all)) def get_report(self): if self.args.path: return Device(self.args.path, with_lsm=self.args.with_lsm).json_report() else: - return Devices(filter_for_batch=self.args.filter_for_batch, with_lsm=self.args.with_lsm).json_report() + return Devices(filter_for_batch=self.args.filter_for_batch, + with_lsm=self.args.with_lsm, + list_all=self.args.list_all).json_report() def format_report(self, inventory): if self.args.format == 'json': diff --git a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py index 603b2b854..139328b4a 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/api/test_lvm.py @@ -883,3 +883,15 @@ class TestGetSingleLV(object): assert isinstance(lv_, api.Volume) assert lv_.name == 'lv1' + + +class TestHelpers: + def test_get_lv_path_from_mapper(self): + mapper = '/dev/mapper/ceph--c1a97e46--234c--46aa--a549--3ca1d1f356a9-osd--block--32e8e896--172e--4a38--a06a--3702598510ec' + lv_path = api.get_lv_path_from_mapper(mapper) + assert lv_path == '/dev/ceph-c1a97e46-234c-46aa-a549-3ca1d1f356a9/osd-block-32e8e896-172e-4a38-a06a-3702598510ec' + + def test_get_mapper_from_lv_path(self): + lv_path = '/dev/ceph-c1a97e46-234c-46aa-a549-3ca1d1f356a9/osd-block-32e8e896-172e-4a38-a06a-3702598510ec' + mapper = api.get_mapper_from_lv_path(lv_path) + assert mapper == '/dev/mapper/ceph--c1a97e46--234c--46aa--a549--3ca1d1f356a9/osd--block--32e8e896--172e--4a38--a06a/3702598510ec' diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py index 4b8304ce6..044fd3519 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py @@ -56,4 +56,4 @@ class TestDeactivate(object): p_get_lvs.return_value = [FooVolume] deactivate.deactivate_osd(0) - p_dm_close.assert_called_with('123') + p_dm_close.assert_called_with(mapping='123', skip_path_check=True) diff --git a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py index 4c86d0ca1..7e516f3d2 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py @@ -5,6 +5,7 @@ from ceph_volume.api import lvm as api from ceph_volume.devices.lvm import migrate from ceph_volume.util.device import Device from ceph_volume.util import system +from ceph_volume.util import encryption as encryption_utils class TestGetClusterName(object): @@ -520,6 +521,9 @@ class TestNew(object): def mock_get_lvs(self, *args, **kwargs): return self.mock_volumes.pop(0) + def mock_prepare_dmcrypt(self, *args, **kwargs): + return '/dev/mapper/' + kwargs['mapping'] + def test_newdb_non_root(self): with pytest.raises(Exception) as error: migrate.NewDB(argv=[ @@ -990,6 +994,74 @@ class TestNew(object): '--dev-target', '/dev/VolGroup/target_volume', '--command', 'bluefs-bdev-new-wal'] + @patch('os.getuid') + def test_newwal_encrypted(self, m_getuid, monkeypatch, capsys): + m_getuid.return_value = 0 + + source_tags = \ + 'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,ceph.encrypted=1' + + data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg', + lv_path='/dev/VolGroup/lv1', lv_tags=source_tags) + + self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol} + + monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='target_uuid', vg_name='vg', + lv_path='/dev/VolGroup/target_volume', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: False) + + #find_associated_devices will call get_lvs() 4 times + # and this needs results to be arranged that way + self.mock_volumes = [] + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([data_vol]) + self.mock_volumes.append([]) + self.mock_volumes.append([]) + + monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs) + + monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster') + monkeypatch.setattr(system, 'chown', lambda path: 0) + + monkeypatch.setattr(encryption_utils, 'prepare_dmcrypt', self.mock_prepare_dmcrypt) + + migrate.NewWAL(argv=[ + '--osd-id', '2', + '--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D', + '--target', 'vgname/new_wal']).main() + + n = len(self.mock_process_input) + assert n >= 3 + + assert self.mock_process_input[n - 3] == [ + 'lvchange', + '--addtag', 'ceph.wal_uuid=target_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/lv1'] + + assert self.mock_process_input[n - 2].sort() == [ + 'lvchange', + '--addtag', 'ceph.osd_id=0', + '--addtag', 'ceph.type=wal', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.wal_uuid=target_uuid', + '--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume', + '/dev/VolGroup/target_volume'].sort() + + assert self.mock_process_input[n - 1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/cluster-2', + '--dev-target', '/dev/mapper/target_uuid', + '--command', 'bluefs-bdev-new-wal'] + class TestMigrate(object): def test_invalid_osd_id_passed(self, is_root): @@ -1014,6 +1086,15 @@ class TestMigrate(object): def mock_get_lvs(self, *args, **kwargs): return self.mock_volumes.pop(0) + mock_prepare_dmcrypt_uuid = '' + def mock_prepare_dmcrypt(self, *args, **kwargs): + self.mock_prepare_dmcrypt_uuid = kwargs['mapping'] + return '/dev/mapper/' + kwargs['mapping'] + + mock_dmcrypt_close_uuid = [] + def mock_dmcrypt_close(self, *args, **kwargs): + self.mock_dmcrypt_close_uuid.append(kwargs['mapping']) + def test_get_source_devices(self, monkeypatch): source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234' @@ -1245,6 +1326,120 @@ Example calls for supported scenarios: '--devs-source', '/var/lib/ceph/osd/ceph-2/block', '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db'] + @patch('os.getuid') + def test_migrate_data_db_to_new_db_encrypted(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,'\ + 'ceph.encrypted=1' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,'\ + 'ceph.encrypted=1' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + } + monkeypatch.setattr(migrate.api, 'get_single_lv', + self.mock_get_single_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + monkeypatch.setattr(encryption_utils, 'prepare_dmcrypt', self.mock_prepare_dmcrypt) + monkeypatch.setattr(encryption_utils, 'dmcrypt_close', self.mock_dmcrypt_close) + + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', 'wal', + '--target', 'vgname/new_wal']) + m.main() + + assert self.mock_prepare_dmcrypt_uuid == self.mock_volume.lv_uuid + + n = len(self.mock_dmcrypt_close_uuid) + assert n >= 1 + assert self.mock_dmcrypt_close_uuid[n-1] == db_vol.lv_uuid + + n = len(self.mock_process_input) + assert n >= 5 + + assert self. mock_process_input[n-5] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.encrypted=1', + '/dev/VolGroup/lv2'] + + assert self. mock_process_input[n-4] == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-3] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-2] == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.encrypted=1', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'] + + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/mapper/new-db-uuid', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db'] + def test_migrate_data_db_to_new_db_active_systemd(self, is_root, monkeypatch, capsys): source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev' @@ -1653,6 +1848,147 @@ Example calls for supported scenarios: '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db', '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'] + @patch('os.getuid') + def test_migrate_data_db_wal_to_new_db_encrypted(self, m_getuid, monkeypatch): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev,ceph.encrypted=1' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.encrypted=1' + source_wal_tags = 'ceph.osd_id=0,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev,ceph.encrypted=1' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_single_lv', + self.mock_get_single_lv) + + self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2_new', + lv_tags='') + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + monkeypatch.setattr(encryption_utils, 'prepare_dmcrypt', self.mock_prepare_dmcrypt) + monkeypatch.setattr(encryption_utils, 'dmcrypt_close', self.mock_dmcrypt_close) + + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'data', 'db', 'wal', + '--target', 'vgname/new_wal']) + m.main() + + assert self.mock_prepare_dmcrypt_uuid == self.mock_volume.lv_uuid + + n = len(self.mock_dmcrypt_close_uuid) + assert n >= 2 + assert self.mock_dmcrypt_close_uuid[n-2] == db_vol.lv_uuid + assert self.mock_dmcrypt_close_uuid[n-1] == wal_vol.lv_uuid + + n = len(self.mock_process_input) + assert n >= 6 + + assert self. mock_process_input[n-6] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=db', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.encrypted=1', + '/dev/VolGroup/lv2'] + + assert self. mock_process_input[n-5] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=0', + '--deltag', 'ceph.type=wal', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '--deltag', 'ceph.encrypted=1', + '/dev/VolGroup/lv3'] + + assert self. mock_process_input[n-4] == [ + 'lvchange', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-3] == [ + 'lvchange', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv1'] + + assert self. mock_process_input[n-2] == [ + 'lvchange', + '--addtag', 'ceph.osd_id=2', + '--addtag', 'ceph.type=db', + '--addtag', 'ceph.osd_fsid=1234', + '--addtag', 'ceph.cluster_name=ceph', + '--addtag', 'ceph.encrypted=1', + '--addtag', 'ceph.db_uuid=new-db-uuid', + '--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new', + '/dev/VolGroup/lv2_new'] + + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/dev/mapper/new-db-uuid', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.db', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'] + @patch('os.getuid') def test_dont_migrate_data_db_wal_to_new_data(self, m_getuid, @@ -2129,6 +2465,120 @@ Example calls for supported scenarios: '--devs-source', '/var/lib/ceph/osd/ceph-2/block', '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'] + @patch('os.getuid') + def test_migrate_data_wal_to_db_encrypted(self, + m_getuid, + monkeypatch, + capsys): + m_getuid.return_value = 0 + + source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev,ceph.encrypted=1' + source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev,ceph.encrypted=1' + source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \ + 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ + 'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev,ceph.encrypted=1' + + data_vol = api.Volume(lv_name='volume1', + lv_uuid='datauuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv1', + lv_tags=source_tags) + db_vol = api.Volume(lv_name='volume2', + lv_uuid='dbuuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv2', + lv_tags=source_db_tags) + + wal_vol = api.Volume(lv_name='volume3', + lv_uuid='waluuid', + vg_name='vg', + lv_path='/dev/VolGroup/lv3', + lv_tags=source_wal_tags) + + self.mock_single_volumes = { + '/dev/VolGroup/lv1': data_vol, + '/dev/VolGroup/lv2': db_vol, + '/dev/VolGroup/lv3': wal_vol, + } + monkeypatch.setattr(migrate.api, 'get_single_lv', + self.mock_get_single_lv) + + self.mock_volume = db_vol + monkeypatch.setattr(api, 'get_lv_by_fullname', + self.mock_get_lv_by_fullname) + + self.mock_process_input = [] + monkeypatch.setattr(process, 'call', self.mock_process) + + devices = [] + devices.append([Device('/dev/VolGroup/lv1'), 'block']) + devices.append([Device('/dev/VolGroup/lv2'), 'db']) + devices.append([Device('/dev/VolGroup/lv3'), 'wal']) + + monkeypatch.setattr(migrate, 'find_associated_devices', + lambda osd_id, osd_fsid: devices) + + monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", + lambda id: False) + + monkeypatch.setattr(migrate, 'get_cluster_name', + lambda osd_id, osd_fsid: 'ceph') + monkeypatch.setattr(system, 'chown', lambda path: 0) + monkeypatch.setattr(encryption_utils, 'prepare_dmcrypt', self.mock_prepare_dmcrypt) + monkeypatch.setattr(encryption_utils, 'dmcrypt_close', self.mock_dmcrypt_close) + m = migrate.Migrate(argv=[ + '--osd-id', '2', + '--osd-fsid', '1234', + '--from', 'db', 'data', 'wal', + '--target', 'vgname/db']) + + m.main() + + assert self.mock_prepare_dmcrypt_uuid == '' + + n = len(self.mock_dmcrypt_close_uuid) + assert n >= 1 + assert self.mock_dmcrypt_close_uuid[n-1] == wal_vol.lv_uuid + + n = len(self.mock_process_input) + assert n >= 1 + for s in self.mock_process_input: + print(s) + + assert self. mock_process_input[n-4] == [ + 'lvchange', + '--deltag', 'ceph.osd_id=2', + '--deltag', 'ceph.type=wal', + '--deltag', 'ceph.osd_fsid=1234', + '--deltag', 'ceph.cluster_name=ceph', + '--deltag', 'ceph.db_uuid=dbuuid', + '--deltag', 'ceph.db_device=db_dev', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '--deltag', 'ceph.encrypted=1', + '/dev/VolGroup/lv3'] + assert self. mock_process_input[n-3] == [ + 'lvchange', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv1'] + assert self. mock_process_input[n-2] == [ + 'lvchange', + '--deltag', 'ceph.wal_uuid=waluuid', + '--deltag', 'ceph.wal_device=wal_dev', + '/dev/VolGroup/lv2'] + assert self. mock_process_input[n-1] == [ + 'ceph-bluestore-tool', + '--path', '/var/lib/ceph/osd/ceph-2', + '--dev-target', '/var/lib/ceph/osd/ceph-2/block.db', + '--command', 'bluefs-bdev-migrate', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block', + '--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal'] + def test_migrate_data_wal_to_db_active_systemd(self, is_root, monkeypatch, capsys): source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \ 'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \ diff --git a/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py b/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py index f9e2b76b2..ce1f9466f 100644 --- a/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py +++ b/ceph/src/ceph-volume/ceph_volume/tests/util/test_disk.py @@ -1,7 +1,37 @@ import os import pytest from ceph_volume.util import disk -from mock.mock import patch +from mock.mock import patch, MagicMock + + +class TestFunctions: + @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=False)) + def test_is_device_path_does_not_exist(self): + assert not disk.is_device('/dev/foo') + + @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True)) + def test_is_device_dev_doesnt_startswith_dev(self): + assert not disk.is_device('/foo') + + @patch('ceph_volume.util.disk.allow_loop_devices', MagicMock(return_value=False)) + @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True)) + def test_is_device_loop_not_allowed(self): + assert not disk.is_device('/dev/loop123') + + @patch('ceph_volume.util.disk.lsblk', MagicMock(return_value={'NAME': 'foo', 'TYPE': 'disk'})) + @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True)) + def test_is_device_type_disk(self): + assert disk.is_device('/dev/foo') + + @patch('ceph_volume.util.disk.lsblk', MagicMock(return_value={'NAME': 'foo', 'TYPE': 'mpath'})) + @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True)) + def test_is_device_type_mpath(self): + assert disk.is_device('/dev/foo') + + @patch('ceph_volume.util.disk.lsblk', MagicMock(return_value={'NAME': 'foo1', 'TYPE': 'part'})) + @patch('ceph_volume.util.disk.os.path.exists', MagicMock(return_value=True)) + def test_is_device_type_part(self): + assert not disk.is_device('/dev/foo1') class TestLsblkParser(object): @@ -225,7 +255,6 @@ class TestGetDevices(object): result = disk.get_devices(_sys_block_path=str(tmpdir)) assert result == {} - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_block_is_found(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] @@ -235,7 +264,6 @@ class TestGetDevices(object): assert result[sda_path]['model'] == '' assert result[sda_path]['partitions'] == {} - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_size(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] @@ -244,7 +272,6 @@ class TestGetDevices(object): assert list(result.keys()) == [sda_path] assert result[sda_path]['human_readable_size'] == '512.00 KB' - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_sectorsize_fallsback(self, patched_get_block_devs_sysfs, fake_filesystem): # if no sectorsize, it will use queue/hw_sector_size sda_path = '/dev/sda' @@ -254,7 +281,6 @@ class TestGetDevices(object): assert list(result.keys()) == [sda_path] assert result[sda_path]['sectorsize'] == '1024' - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_sectorsize_from_logical_block(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] @@ -262,7 +288,6 @@ class TestGetDevices(object): result = disk.get_devices() assert result[sda_path]['sectorsize'] == '99' - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_sectorsize_does_not_fallback(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] @@ -271,7 +296,6 @@ class TestGetDevices(object): result = disk.get_devices() assert result[sda_path]['sectorsize'] == '99' - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_is_rotational(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] @@ -279,14 +303,12 @@ class TestGetDevices(object): result = disk.get_devices() assert result[sda_path]['rotational'] == '1' - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_is_ceph_rbd(self, patched_get_block_devs_sysfs, fake_filesystem): rbd_path = '/dev/rbd0' patched_get_block_devs_sysfs.return_value = [[rbd_path, rbd_path, 'disk']] result = disk.get_devices() assert rbd_path not in result - @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_actuator_device(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' fake_actuator_nb = 2 diff --git a/ceph/src/ceph-volume/ceph_volume/util/device.py b/ceph/src/ceph-volume/ceph_volume/util/device.py index 0a3799ff1..d61222afe 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/device.py +++ b/ceph/src/ceph-volume/ceph_volume/util/device.py @@ -33,20 +33,30 @@ class Devices(object): A container for Device instances with reporting """ - def __init__(self, filter_for_batch=False, with_lsm=False): + def __init__(self, + filter_for_batch=False, + with_lsm=False, + list_all=False): lvs = lvm.get_lvs() lsblk_all = disk.lsblk_all() all_devices_vgs = lvm.get_all_devices_vgs() if not sys_info.devices: sys_info.devices = disk.get_devices() - self.devices = [Device(k, - with_lsm, - lvs=lvs, - lsblk_all=lsblk_all, - all_devices_vgs=all_devices_vgs) for k in - sys_info.devices.keys()] - if filter_for_batch: - self.devices = [d for d in self.devices if d.available_lvm_batch] + self._devices = [Device(k, + with_lsm, + lvs=lvs, + lsblk_all=lsblk_all, + all_devices_vgs=all_devices_vgs) for k in + sys_info.devices.keys()] + self.devices = [] + for device in self._devices: + if filter_for_batch and not device.available_lvm_batch: + continue + if device.is_lv and not list_all: + continue + if device.is_partition and not list_all: + continue + self.devices.append(device) def pretty_report(self): output = [ @@ -490,7 +500,7 @@ class Device(object): @property def is_acceptable_device(self): - return self.is_device or self.is_partition + return self.is_device or self.is_partition or self.is_lv @property def is_encrypted(self): @@ -586,7 +596,6 @@ class Device(object): reasons = [ ('removable', 1, 'removable'), ('ro', 1, 'read-only'), - ('locked', 1, 'locked'), ] rejected = [reason for (k, v, reason) in reasons if self.sys_api.get(k, '') == v] @@ -622,6 +631,8 @@ class Device(object): rejected.append('Has GPT headers') if self.has_partitions: rejected.append('Has partitions') + if self.has_fs: + rejected.append('Has a FileSystem') return rejected def _check_lvm_reject_reasons(self): diff --git a/ceph/src/ceph-volume/ceph_volume/util/disk.py b/ceph/src/ceph-volume/ceph_volume/util/disk.py index a69b0077e..ee061b724 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/disk.py +++ b/ceph/src/ceph-volume/ceph_volume/util/disk.py @@ -359,6 +359,10 @@ def is_device(dev): if not allow_loop_devices(): return False + TYPE = lsblk(dev).get('TYPE') + if TYPE: + return TYPE in ['disk', 'mpath'] + # fallback to stat return _stat_is_device(os.lstat(dev).st_mode) @@ -734,28 +738,6 @@ def is_mapper_device(device_name): return device_name.startswith(('/dev/mapper', '/dev/dm-')) -def is_locked_raw_device(disk_path): - """ - A device can be locked by a third party software like a database. - To detect that case, the device is opened in Read/Write and exclusive mode - """ - open_flags = (os.O_RDWR | os.O_EXCL) - open_mode = 0 - fd = None - - try: - fd = os.open(disk_path, open_flags, open_mode) - except OSError: - return 1 - - try: - os.close(fd) - except OSError: - return 1 - - return 0 - - class AllowLoopDevices(object): allow = False warned = False @@ -829,6 +811,19 @@ def get_block_devs_sysfs(_sys_block_path='/sys/block', _sys_dev_block_path='/sys result.append([name, kname, "part"]) return sorted(result, key=lambda x: x[0]) +def get_partitions(_sys_dev_block_path ='/sys/dev/block'): + devices = os.listdir(_sys_dev_block_path) + result = dict() + for device in devices: + device_path = os.path.join(_sys_dev_block_path, device) + is_partition = get_file_contents(os.path.join(device_path, 'partition')) == "1" + if not is_partition: + continue + + partition_sys_name = os.path.basename(os.readlink(device_path)) + parent_device_sys_name = os.readlink(device_path).split('/')[-2:-1][0] + result[partition_sys_name] = parent_device_sys_name + return result def get_devices(_sys_block_path='/sys/block', device=''): """ @@ -844,17 +839,22 @@ def get_devices(_sys_block_path='/sys/block', device=''): device_facts = {} block_devs = get_block_devs_sysfs(_sys_block_path) + partitions = get_partitions() - block_types = ['disk', 'mpath'] + block_types = ['disk', 'mpath', 'lvm', 'part'] if allow_loop_devices(): block_types.append('loop') for block in block_devs: + if block[2] == 'lvm': + block[1] = lvm.get_lv_path_from_mapper(block[1]) devname = os.path.basename(block[0]) diskname = block[1] if block[2] not in block_types: continue sysdir = os.path.join(_sys_block_path, devname) + if block[2] == 'part': + sysdir = os.path.join(_sys_block_path, partitions[devname], devname) metadata = {} # If the device is ceph rbd it gets excluded @@ -882,11 +882,17 @@ def get_devices(_sys_block_path='/sys/block', device=''): for key, file_ in facts: metadata[key] = get_file_contents(os.path.join(sysdir, file_)) - device_slaves = os.listdir(os.path.join(sysdir, 'slaves')) + if block[2] != 'part': + device_slaves = os.listdir(os.path.join(sysdir, 'slaves')) + metadata['partitions'] = get_partitions_facts(sysdir) + if device_slaves: metadata['device_nodes'] = ','.join(device_slaves) else: - metadata['device_nodes'] = devname + if block[2] == 'part': + metadata['device_nodes'] = partitions[devname] + else: + metadata['device_nodes'] = devname metadata['actuators'] = None if os.path.isdir(sysdir + "/queue/independent_access_ranges/"): @@ -914,7 +920,6 @@ def get_devices(_sys_block_path='/sys/block', device=''): metadata['size'] = float(size) * 512 metadata['human_readable_size'] = human_readable_size(metadata['size']) metadata['path'] = diskname - metadata['locked'] = is_locked_raw_device(metadata['path']) metadata['type'] = block[2] device_facts[diskname] = metadata diff --git a/ceph/src/ceph-volume/ceph_volume/util/encryption.py b/ceph/src/ceph-volume/ceph_volume/util/encryption.py index fdb73e1b1..f8aea80b4 100644 --- a/ceph/src/ceph-volume/ceph_volume/util/encryption.py +++ b/ceph/src/ceph-volume/ceph_volume/util/encryption.py @@ -106,20 +106,20 @@ def luks_open(key, device, mapping): process.call(command, stdin=key, terminal_verbose=True, show_command=True) -def dmcrypt_close(mapping): +def dmcrypt_close(mapping, skip_path_check=False): """ Encrypt (close) a device, previously decrypted with cryptsetup - :param mapping: + :param mapping: mapping name or path used to correlate device. + :param skip_path_check: whether we need path presence validation. """ - if not os.path.exists(mapping): + if not skip_path_check and not os.path.exists(mapping): logger.debug('device mapper path does not exist %s' % mapping) logger.debug('will skip cryptsetup removal') return # don't be strict about the remove call, but still warn on the terminal if it fails process.run(['cryptsetup', 'remove', mapping], stop_on_error=False) - def get_dmcrypt_key(osd_id, osd_fsid, lockbox_keyring=None): """ Retrieve the dmcrypt (secret) key stored initially on the monitor. The key @@ -273,3 +273,22 @@ def legacy_encrypted(device): metadata['lockbox'] = d.path break return metadata + +def prepare_dmcrypt(key, device, mapping): + """ + Helper for devices that are encrypted. The operations needed for + block, db, wal, or data/journal devices are all the same + """ + if not device: + return '' + # format data device + luks_format( + key, + device + ) + luks_open( + key, + device, + mapping + ) + return '/dev/mapper/%s' % mapping diff --git a/ceph/src/cephadm/CMakeLists.txt b/ceph/src/cephadm/CMakeLists.txt index fdb7c9881..8b969bc33 100644 --- a/ceph/src/cephadm/CMakeLists.txt +++ b/ceph/src/cephadm/CMakeLists.txt @@ -11,7 +11,13 @@ add_custom_command( ${CMAKE_CURRENT_SOURCE_DIR}/cephadm.py ${CMAKE_CURRENT_SOURCE_DIR}/build.py WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMAND ${Python3_EXECUTABLE} build.py ${bin_target_file} + COMMAND ${Python3_EXECUTABLE} build.py + --set-version-var=CEPH_GIT_VER=${CEPH_GIT_VER} + --set-version-var=CEPH_GIT_NICE_VER=${CEPH_GIT_NICE_VER} + --set-version-var=CEPH_RELEASE=${CEPH_RELEASE} + --set-version-var=CEPH_RELEASE_NAME=${CEPH_RELEASE_NAME} + --set-version-var=CEPH_RELEASE_TYPE=${CEPH_RELEASE_TYPE} + ${bin_target_file} ) add_custom_target(cephadm ALL diff --git a/ceph/src/cephadm/build.py b/ceph/src/cephadm/build.py index 39c93ce3b..4264b814f 100755 --- a/ceph/src/cephadm/build.py +++ b/ceph/src/cephadm/build.py @@ -27,6 +27,15 @@ except ImportError: log = logging.getLogger(__name__) +_VALID_VERS_VARS = [ + "CEPH_GIT_VER", + "CEPH_GIT_NICE_VER", + "CEPH_RELEASE", + "CEPH_RELEASE_NAME", + "CEPH_RELEASE_TYPE", +] + + def _reexec(python): """Switch to the selected version of python by exec'ing into the desired python path. @@ -45,7 +54,7 @@ def _did_rexec(): return bool(os.environ.get("_BUILD_PYTHON_SET", "")) -def _build(dest, src): +def _build(dest, src, versioning_vars=None): """Build the binary.""" os.chdir(src) tempdir = pathlib.Path(tempfile.mkdtemp(suffix=".cephadm.build")) @@ -61,6 +70,8 @@ def _build(dest, src): # dir to be zipped. For now we just have a simple call to copy # (and rename) the one file we care about. shutil.copy("cephadm.py", tempdir / "__main__.py") + if versioning_vars: + generate_version_file(versioning_vars, tempdir / "_version.py") _compile(dest, tempdir) finally: shutil.rmtree(tempdir) @@ -115,6 +126,24 @@ def _install_deps(tempdir): ) +def generate_version_file(versioning_vars, dest): + log.info("Generating version file") + log.debug("versioning_vars=%r", versioning_vars) + with open(dest, "w") as fh: + print("# GENERATED FILE -- do not edit", file=fh) + for key, value in versioning_vars: + print(f"{key} = {value!r}", file=fh) + + +def version_kv_pair(value): + if "=" not in value: + raise argparse.ArgumentTypeError(f"not a key=value pair: {value!r}") + key, value = value.split("=", 1) + if key not in _VALID_VERS_VARS: + raise argparse.ArgumentTypeError(f"Unexpected key: {key!r}") + return key, value + + def main(): handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter("cephadm/build.py: %(message)s")) @@ -132,6 +161,14 @@ def main(): parser.add_argument( "--python", help="The path to the desired version of python" ) + parser.add_argument( + "--set-version-var", + "-S", + type=version_kv_pair, + dest="version_vars", + action="append", + help="Set a key=value pair in the generated version info file", + ) args = parser.parse_args() if not _did_rexec() and args.python: @@ -160,7 +197,7 @@ def main(): dest = pathlib.Path(args.dest).absolute() log.info("Source Dir: %s", source) log.info("Destination Path: %s", dest) - _build(dest, source) + _build(dest, source, versioning_vars=args.version_vars) if __name__ == "__main__": diff --git a/ceph/src/cephadm/cephadm.py b/ceph/src/cephadm/cephadm.py index 593a08f00..bcb82c4c4 100755 --- a/ceph/src/cephadm/cephadm.py +++ b/ceph/src/cephadm/cephadm.py @@ -26,13 +26,13 @@ import errno import struct import ssl from enum import Enum -from typing import Dict, List, Tuple, Optional, Union, Any, NoReturn, Callable, IO, Sequence, TypeVar, cast, Set, Iterable, TextIO +from typing import Dict, List, Tuple, Optional, Union, Any, NoReturn, Callable, IO, Sequence, TypeVar, cast, Set, Iterable, TextIO, Generator import re import uuid from configparser import ConfigParser -from contextlib import redirect_stdout +from contextlib import redirect_stdout, contextmanager from functools import wraps from glob import glob from io import StringIO @@ -55,6 +55,7 @@ DEFAULT_ALERT_MANAGER_IMAGE = 'quay.io/prometheus/alertmanager:v0.25.0' DEFAULT_GRAFANA_IMAGE = 'quay.io/ceph/ceph-grafana:9.4.7' DEFAULT_HAPROXY_IMAGE = 'quay.io/ceph/haproxy:2.3' DEFAULT_KEEPALIVED_IMAGE = 'quay.io/ceph/keepalived:2.2.4' +DEFAULT_NVMEOF_IMAGE = 'quay.io/ceph/nvmeof:0.0.1' DEFAULT_SNMP_GATEWAY_IMAGE = 'docker.io/maxwo/snmp-notifier:v1.2.1' DEFAULT_ELASTICSEARCH_IMAGE = 'quay.io/omrizeneva/elasticsearch:6.8.23' DEFAULT_JAEGER_COLLECTOR_IMAGE = 'quay.io/jaegertracing/jaeger-collector:1.29' @@ -79,6 +80,7 @@ CEPH_DEFAULT_KEYRING = f'/etc/ceph/{CEPH_KEYRING}' CEPH_DEFAULT_PUBKEY = f'/etc/ceph/{CEPH_PUBKEY}' LOG_DIR_MODE = 0o770 DATA_DIR_MODE = 0o700 +DEFAULT_MODE = 0o600 CONTAINER_INIT = True MIN_PODMAN_VERSION = (2, 0, 2) CGROUPS_SPLIT_PODMAN_VERSION = (2, 1, 0) @@ -88,6 +90,7 @@ DEFAULT_TIMEOUT = None # in seconds DEFAULT_RETRY = 15 DATEFMT = '%Y-%m-%dT%H:%M:%S.%fZ' QUIET_LOG_LEVEL = 9 # DEBUG is 10, so using 9 to be lower level than DEBUG +NO_DEPRECATED = False logger: logging.Logger = None # type: ignore @@ -169,6 +172,17 @@ class ContainerInfo: and self.version == other.version) +class DeploymentType(Enum): + # Fresh deployment of a daemon. + DEFAULT = 'Deploy' + # Redeploying a daemon. Works the same as fresh + # deployment minus port checking. + REDEPLOY = 'Redeploy' + # Reconfiguring a daemon. Rewrites config + # files and potentially restarts daemon. + RECONFIG = 'Reconfig' + + class BaseConfig: def __init__(self) -> None: @@ -363,6 +377,10 @@ class Error(Exception): pass +class ClusterAlreadyExists(Exception): + pass + + class TimeoutExpired(Error): pass @@ -376,7 +394,7 @@ class UnauthorizedRegistryError(Error): class Ceph(object): daemons = ('mon', 'mgr', 'osd', 'mds', 'rgw', 'rbd-mirror', 'crash', 'cephfs-mirror', 'ceph-exporter') - gateways = ('iscsi', 'nfs') + gateways = ('iscsi', 'nfs', 'nvmeof') ################################## @@ -432,9 +450,9 @@ class SNMPGateway: @classmethod def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'SNMPGateway': - assert ctx.config_json - return cls(ctx, fsid, daemon_id, - get_parm(ctx.config_json), ctx.image) + cfgs = fetch_configs(ctx) + assert cfgs # assert some config data was found + return cls(ctx, fsid, daemon_id, cfgs, ctx.image) @staticmethod def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]: @@ -467,13 +485,10 @@ class SNMPGateway: @property def port(self) -> int: - if not self.ctx.tcp_ports: + endpoints = fetch_tcp_ports(self.ctx) + if not endpoints: return self.DEFAULT_PORT - else: - if len(self.ctx.tcp_ports) > 0: - return int(self.ctx.tcp_ports.split()[0]) - else: - return self.DEFAULT_PORT + return endpoints[0].port def get_daemon_args(self) -> List[str]: v3_args = [] @@ -511,7 +526,7 @@ class SNMPGateway: def create_daemon_conf(self) -> None: """Creates the environment file holding 'secrets' passed to the snmp-notifier daemon""" - with open(os.open(self.conf_file_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with write_new(self.conf_file_path) as f: if self.snmp_version == 'V2c': f.write(f'SNMP_NOTIFIER_COMMUNITY={self.snmp_community}\n') else: @@ -657,6 +672,42 @@ class Monitoring(object): ################################## +@contextmanager +def write_new( + destination: Union[str, Path], + *, + owner: Optional[Tuple[int, int]] = None, + perms: Optional[int] = DEFAULT_MODE, + encoding: Optional[str] = None, +) -> Generator[IO, None, None]: + """Write a new file in a robust manner, optionally specifying the owner, + permissions, or encoding. This function takes care to never leave a file in + a partially-written state due to a crash or power outage by writing to + temporary file and then renaming that temp file over to the final + destination once all data is written. Note that the temporary files can be + leaked but only for a "crash" or power outage - regular exceptions will + clean up the temporary file. + """ + destination = os.path.abspath(destination) + tempname = f'{destination}.new' + open_kwargs: Dict[str, Any] = {} + if encoding: + open_kwargs['encoding'] = encoding + try: + with open(tempname, 'w', **open_kwargs) as fh: + yield fh + fh.flush() + os.fsync(fh.fileno()) + if owner is not None: + os.fchown(fh.fileno(), *owner) + if perms is not None: + os.fchmod(fh.fileno(), perms) + except Exception: + os.unlink(tempname) + raise + os.rename(tempname, destination) + + def populate_files(config_dir, config_files, uid, gid): # type: (str, Dict, int, int) -> None """create config files for different services""" @@ -664,9 +715,7 @@ def populate_files(config_dir, config_files, uid, gid): config_file = os.path.join(config_dir, fname) config_content = dict_get_join(config_files, fname) logger.info('Write file: %s' % (config_file)) - with open(config_file, 'w', encoding='utf-8') as f: - os.fchown(f.fileno(), uid, gid) - os.fchmod(f.fileno(), 0o600) + with write_new(config_file, owner=(uid, gid), encoding='utf-8') as f: f.write(config_content) @@ -709,7 +758,7 @@ class NFSGanesha(object): @classmethod def init(cls, ctx, fsid, daemon_id): # type: (CephadmContext, str, Union[int, str]) -> NFSGanesha - return cls(ctx, fsid, daemon_id, get_parm(ctx.config_json), ctx.image) + return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) def get_container_mounts(self, data_dir): # type: (str) -> Dict[str, str] @@ -801,9 +850,7 @@ class NFSGanesha(object): # write the RGW keyring if self.rgw: keyring_path = os.path.join(data_dir, 'keyring.rgw') - with open(keyring_path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - os.fchown(f.fileno(), uid, gid) + with write_new(keyring_path, owner=(uid, gid)) as f: f.write(self.rgw.get('keyring', '')) ################################## @@ -839,7 +886,7 @@ class CephIscsi(object): def init(cls, ctx, fsid, daemon_id): # type: (CephadmContext, str, Union[int, str]) -> CephIscsi return cls(ctx, fsid, daemon_id, - get_parm(ctx.config_json), ctx.image) + fetch_configs(ctx), ctx.image) @staticmethod def get_container_mounts(data_dir, log_dir): @@ -849,6 +896,7 @@ class CephIscsi(object): mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z' mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' + mounts[os.path.join(data_dir, 'tcmu-runner-entrypoint.sh')] = '/usr/local/scripts/tcmu-runner-entrypoint.sh' mounts[log_dir] = '/var/log:z' mounts['/dev'] = '/dev' return mounts @@ -870,7 +918,8 @@ class CephIscsi(object): version = None out, err, code = call(ctx, [ctx.container_engine.path, 'exec', container_id, - '/usr/bin/python3', '-c', "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], + '/usr/bin/python3', '-c', + "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], verbosity=CallVerbosity.QUIET) if code == 0: version = out.strip() @@ -912,9 +961,19 @@ class CephIscsi(object): configfs_dir = os.path.join(data_dir, 'configfs') makedirs(configfs_dir, uid, gid, 0o755) + # set up the tcmu-runner entrypoint script + # to be mounted into the container. For more info + # on why we need this script, see the + # tcmu_runner_entrypoint_script function + self.files['tcmu-runner-entrypoint.sh'] = self.tcmu_runner_entrypoint_script() + # populate files from the config-json populate_files(data_dir, self.files, uid, gid) + # we want the tcmu runner entrypoint script to be executable + # populate_files will give it 0o600 by default + os.chmod(os.path.join(data_dir, 'tcmu-runner-entrypoint.sh'), 0o700) + @staticmethod def configfs_mount_umount(data_dir, mount=True): # type: (str, bool) -> List[str] @@ -927,16 +986,182 @@ class CephIscsi(object): 'umount {0}; fi'.format(mount_path) return cmd.split() + @staticmethod + def tcmu_runner_entrypoint_script() -> str: + # since we are having tcmu-runner be a background + # process in its systemd unit (rbd-target-api being + # the main process) systemd will not restart it when + # it fails. in order to try and get around that for now + # we can have a script mounted in the container that + # that attempts to do the restarting for us. This script + # can then become the entrypoint for the tcmu-runner + # container + + # This is intended to be dropped for a better solution + # for at least the squid release onward + return """#!/bin/bash +RUN_DIR=/var/run/tcmu-runner + +if [ ! -d "${RUN_DIR}" ] ; then + mkdir -p "${RUN_DIR}" +fi + +rm -rf "${RUN_DIR}"/* + +while true +do + touch "${RUN_DIR}"/start-up-$(date -Ins) + /usr/bin/tcmu-runner + + # If we got around 3 kills/segfaults in the last minute, + # don't start anymore + if [ $(find "${RUN_DIR}" -type f -cmin -1 | wc -l) -ge 3 ] ; then + exit 0 + fi + + sleep 1 +done +""" + def get_tcmu_runner_container(self): # type: () -> CephContainer # daemon_id, is used to generated the cid and pid files used by podman but as both tcmu-runner # and rbd-target-api have the same daemon_id, it conflits and prevent the second container from # starting. .tcmu runner is appended to the daemon_id to fix that. - tcmu_container = get_container(self.ctx, self.fsid, self.daemon_type, str(self.daemon_id) + '.tcmu') - tcmu_container.entrypoint = '/usr/bin/tcmu-runner' + tcmu_container = get_deployment_container(self.ctx, self.fsid, self.daemon_type, str(self.daemon_id) + '.tcmu') + # TODO: Eventually we don't want to run tcmu-runner through this script. + # This is intended to be a workaround backported to older releases + # and should eventually be removed in at least squid onward + tcmu_container.entrypoint = '/usr/local/scripts/tcmu-runner-entrypoint.sh' tcmu_container.cname = self.get_container_name(desc='tcmu') return tcmu_container + +################################## + + +class CephNvmeof(object): + """Defines a Ceph-Nvmeof container""" + + daemon_type = 'nvmeof' + required_files = ['ceph-nvmeof.conf'] + default_image = DEFAULT_NVMEOF_IMAGE + + def __init__(self, + ctx, + fsid, + daemon_id, + config_json, + image=DEFAULT_NVMEOF_IMAGE): + # type: (CephadmContext, str, Union[int, str], Dict, str) -> None + self.ctx = ctx + self.fsid = fsid + self.daemon_id = daemon_id + self.image = image + + # config-json options + self.files = dict_get(config_json, 'files', {}) + + # validate the supplied args + self.validate() + + @classmethod + def init(cls, ctx, fsid, daemon_id): + # type: (CephadmContext, str, Union[int, str]) -> CephNvmeof + return cls(ctx, fsid, daemon_id, + fetch_configs(ctx), ctx.image) + + @staticmethod + def get_container_mounts(data_dir: str) -> Dict[str, str]: + mounts = dict() + mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' + mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' + mounts[os.path.join(data_dir, 'ceph-nvmeof.conf')] = '/src/ceph-nvmeof.conf:z' + mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' + mounts['/dev/hugepages'] = '/dev/hugepages' + mounts['/dev/vfio/vfio'] = '/dev/vfio/vfio' + return mounts + + @staticmethod + def get_container_binds(): + # type: () -> List[List[str]] + binds = [] + lib_modules = ['type=bind', + 'source=/lib/modules', + 'destination=/lib/modules', + 'ro=true'] + binds.append(lib_modules) + return binds + + @staticmethod + def get_version(ctx: CephadmContext, container_id: str) -> Optional[str]: + out, err, ret = call(ctx, + [ctx.container_engine.path, 'inspect', + '--format', '{{index .Config.Labels "io.ceph.version"}}', + ctx.image]) + version = None + if ret == 0: + version = out.strip() + return version + + def validate(self): + # type: () -> None + if not is_fsid(self.fsid): + raise Error('not an fsid: %s' % self.fsid) + if not self.daemon_id: + raise Error('invalid daemon_id: %s' % self.daemon_id) + if not self.image: + raise Error('invalid image: %s' % self.image) + + # check for the required files + if self.required_files: + for fname in self.required_files: + if fname not in self.files: + raise Error('required file missing from config-json: %s' % fname) + + def get_daemon_name(self): + # type: () -> str + return '%s.%s' % (self.daemon_type, self.daemon_id) + + def get_container_name(self, desc=None): + # type: (Optional[str]) -> str + cname = '%s-%s' % (self.fsid, self.get_daemon_name()) + if desc: + cname = '%s-%s' % (cname, desc) + return cname + + def create_daemon_dirs(self, data_dir, uid, gid): + # type: (str, int, int) -> None + """Create files under the container data dir""" + if not os.path.isdir(data_dir): + raise OSError('data_dir is not a directory: %s' % (data_dir)) + + logger.info('Creating ceph-nvmeof config...') + configfs_dir = os.path.join(data_dir, 'configfs') + makedirs(configfs_dir, uid, gid, 0o755) + + # populate files from the config-json + populate_files(data_dir, self.files, uid, gid) + + @staticmethod + def configfs_mount_umount(data_dir, mount=True): + # type: (str, bool) -> List[str] + mount_path = os.path.join(data_dir, 'configfs') + if mount: + cmd = 'if ! grep -qs {0} /proc/mounts; then ' \ + 'mount -t configfs none {0}; fi'.format(mount_path) + else: + cmd = 'if grep -qs {0} /proc/mounts; then ' \ + 'umount {0}; fi'.format(mount_path) + return cmd.split() + + @staticmethod + def get_sysctl_settings() -> List[str]: + return [ + 'vm.nr_hugepages = 4096', + ] + + ################################## @@ -974,7 +1199,7 @@ class CephExporter(object): def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'CephExporter': return cls(ctx, fsid, daemon_id, - get_parm(ctx.config_json), ctx.image) + fetch_configs(ctx), ctx.image) @staticmethod def get_container_mounts() -> Dict[str, str]: @@ -1023,7 +1248,7 @@ class HAproxy(object): @classmethod def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'HAproxy': - return cls(ctx, fsid, daemon_id, get_parm(ctx.config_json), + return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: @@ -1112,7 +1337,7 @@ class Keepalived(object): def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'Keepalived': return cls(ctx, fsid, daemon_id, - get_parm(ctx.config_json), ctx.image) + fetch_configs(ctx), ctx.image) def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: """Create files under the container data dir""" @@ -1248,7 +1473,7 @@ class CustomContainer(object): def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'CustomContainer': return cls(fsid, daemon_id, - get_parm(ctx.config_json), ctx.image) + fetch_configs(ctx), ctx.image) def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: """ @@ -1269,9 +1494,7 @@ class CustomContainer(object): logger.info('Creating file: {}'.format(file_path)) content = dict_get_join(self.files, file_path) file_path = os.path.join(data_dir, file_path.strip('/')) - with open(file_path, 'w', encoding='utf-8') as f: - os.fchown(f.fileno(), uid, gid) - os.fchmod(f.fileno(), 0o600) + with write_new(file_path, owner=(uid, gid), encoding='utf-8') as f: f.write(content) def get_daemon_args(self) -> List[str]: @@ -1366,7 +1589,7 @@ def dict_get(d: Dict, key: str, default: Any = None, require: bool = False) -> A ################################## -def dict_get_join(d: Dict, key: str) -> Any: +def dict_get_join(d: Dict[str, Any], key: str) -> Any: """ Helper function to get the value of a given key from a dictionary. `List` values will be converted to a string by joining them with a @@ -1390,6 +1613,7 @@ def get_supported_daemons(): supported_daemons.extend(Monitoring.components) supported_daemons.append(NFSGanesha.daemon_type) supported_daemons.append(CephIscsi.daemon_type) + supported_daemons.append(CephNvmeof.daemon_type) supported_daemons.append(CustomContainer.daemon_type) supported_daemons.append(HAproxy.daemon_type) supported_daemons.append(Keepalived.daemon_type) @@ -1417,22 +1641,21 @@ def attempt_bind(ctx, s, address, port): logger.warning(msg) raise PortOccupiedError(msg) else: - raise Error(e) + raise e except Exception as e: raise Error(e) finally: s.close() -def port_in_use(ctx, port_num): - # type: (CephadmContext, int) -> bool +def port_in_use(ctx: CephadmContext, endpoint: EndPoint) -> bool: """Detect whether a port is in use on the local machine - IPv4 and IPv6""" - logger.info('Verifying port %d ...' % port_num) + logger.info('Verifying port %s ...' % str(endpoint)) def _port_in_use(af: socket.AddressFamily, address: str) -> bool: try: s = socket.socket(af, socket.SOCK_STREAM) - attempt_bind(ctx, s, address, port_num) + attempt_bind(ctx, s, address, endpoint.port) except PortOccupiedError: return True except OSError as e: @@ -1444,6 +1667,13 @@ def port_in_use(ctx, port_num): else: raise e return False + + if endpoint.ip != '0.0.0.0' and endpoint.ip != '::': + if is_ipv6(endpoint.ip): + return _port_in_use(socket.AF_INET6, endpoint.ip) + else: + return _port_in_use(socket.AF_INET, endpoint.ip) + return any(_port_in_use(af, address) for af, address in ( (socket.AF_INET, '0.0.0.0'), (socket.AF_INET6, '::') @@ -2261,29 +2491,58 @@ def require_image(func: FuncT) -> FuncT: def default_image(func: FuncT) -> FuncT: @wraps(func) def _default_image(ctx: CephadmContext) -> Any: - if not ctx.image: - if 'name' in ctx and ctx.name: - type_ = ctx.name.split('.', 1)[0] - if type_ in Monitoring.components: - ctx.image = Monitoring.components[type_]['image'] - if type_ == 'haproxy': - ctx.image = HAproxy.default_image - if type_ == 'keepalived': - ctx.image = Keepalived.default_image - if type_ == SNMPGateway.daemon_type: - ctx.image = SNMPGateway.default_image - if type_ in Tracing.components: - ctx.image = Tracing.components[type_]['image'] - if not ctx.image: - ctx.image = os.environ.get('CEPHADM_IMAGE') - if not ctx.image: - ctx.image = _get_default_image(ctx) - + update_default_image(ctx) return func(ctx) return cast(FuncT, _default_image) +def update_default_image(ctx: CephadmContext) -> None: + if getattr(ctx, 'image', None): + return + ctx.image = None # ensure ctx.image exists to avoid repeated `getattr`s + name = getattr(ctx, 'name', None) + if name: + type_ = name.split('.', 1)[0] + if type_ in Monitoring.components: + ctx.image = Monitoring.components[type_]['image'] + if type_ == 'haproxy': + ctx.image = HAproxy.default_image + if type_ == 'keepalived': + ctx.image = Keepalived.default_image + if type_ == SNMPGateway.daemon_type: + ctx.image = SNMPGateway.default_image + if type_ == CephNvmeof.daemon_type: + ctx.image = CephNvmeof.default_image + if type_ in Tracing.components: + ctx.image = Tracing.components[type_]['image'] + if not ctx.image: + ctx.image = os.environ.get('CEPHADM_IMAGE') + if not ctx.image: + ctx.image = _get_default_image(ctx) + + +def executes_early(func: FuncT) -> FuncT: + """Decorator that indicates the command function is meant to have no + dependencies and no environmental requirements and can therefore be + executed as non-root and with no logging, etc. Commands that have this + decorator applied must be simple and self-contained. + """ + cast(Any, func)._execute_early = True + return func + + +def deprecated_command(func: FuncT) -> FuncT: + @wraps(func) + def _deprecated_command(ctx: CephadmContext) -> Any: + logger.warning(f'Deprecated command used: {func}') + if NO_DEPRECATED: + raise Error('running deprecated commands disabled') + return func(ctx) + + return cast(FuncT, _deprecated_command) + + def get_container_info(ctx: CephadmContext, daemon_filter: str, by_name: bool) -> Optional[ContainerInfo]: """ :param ctx: Cephadm context @@ -2733,15 +2992,15 @@ def get_daemon_args(ctx, fsid, daemon_type, daemon_id): if daemon_type not in ['grafana', 'loki', 'promtail']: ip = '' port = Monitoring.port_map[daemon_type][0] - if 'meta_json' in ctx and ctx.meta_json: - meta = json.loads(ctx.meta_json) or {} + meta = fetch_meta(ctx) + if meta: if 'ip' in meta and meta['ip']: ip = meta['ip'] if 'ports' in meta and meta['ports']: port = meta['ports'][0] r += [f'--web.listen-address={ip}:{port}'] if daemon_type == 'prometheus': - config = get_parm(ctx.config_json) + config = fetch_configs(ctx) retention_time = config.get('retention_time', '15d') retention_size = config.get('retention_size', '0') # default to disabled r += [f'--storage.tsdb.retention.time={retention_time}'] @@ -2757,7 +3016,7 @@ def get_daemon_args(ctx, fsid, daemon_type, daemon_id): host = wrap_ipv6(addr) if addr else host r += [f'--web.external-url={scheme}://{host}:{port}'] if daemon_type == 'alertmanager': - config = get_parm(ctx.config_json) + config = fetch_configs(ctx) peers = config.get('peers', list()) # type: ignore for peer in peers: r += ['--cluster.peer={}'.format(peer)] @@ -2770,15 +3029,15 @@ def get_daemon_args(ctx, fsid, daemon_type, daemon_id): if daemon_type == 'promtail': r += ['--config.expand-env'] if daemon_type == 'prometheus': - config = get_parm(ctx.config_json) + config = fetch_configs(ctx) try: r += [f'--web.config.file={config["web_config"]}'] except KeyError: pass if daemon_type == 'node-exporter': - config = get_parm(ctx.config_json) + config = fetch_configs(ctx) try: - r += [f'--web.config={config["web_config"]}'] + r += [f'--web.config.file={config["web_config"]}'] except KeyError: pass r += ['--path.procfs=/host/proc', @@ -2815,22 +3074,16 @@ def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, if config: config_path = os.path.join(data_dir, 'config') - with open(config_path, 'w') as f: - os.fchown(f.fileno(), uid, gid) - os.fchmod(f.fileno(), 0o600) + with write_new(config_path, owner=(uid, gid)) as f: f.write(config) if keyring: keyring_path = os.path.join(data_dir, 'keyring') - with open(keyring_path, 'w') as f: - os.fchmod(f.fileno(), 0o600) - os.fchown(f.fileno(), uid, gid) + with write_new(keyring_path, owner=(uid, gid)) as f: f.write(keyring) if daemon_type in Monitoring.components.keys(): - config_json: Dict[str, Any] = dict() - if 'config_json' in ctx: - config_json = get_parm(ctx.config_json) + config_json = fetch_configs(ctx) # Set up directories specific to the monitoring component config_dir = '' @@ -2881,14 +3134,15 @@ def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, # populate the config directory for the component from the config-json if 'files' in config_json: for fname in config_json['files']: - content = dict_get_join(config_json['files'], fname) + # work around mypy wierdness where it thinks `str`s aren't Anys + # when used for dictionary values! feels like possibly a mypy bug?! + cfg = cast(Dict[str, Any], config_json['files']) + content = dict_get_join(cfg, fname) if os.path.isabs(fname): fpath = os.path.join(data_dir_root, fname.lstrip(os.path.sep)) else: fpath = os.path.join(data_dir_root, config_dir, fname) - with open(fpath, 'w', encoding='utf-8') as f: - os.fchown(f.fileno(), uid, gid) - os.fchmod(f.fileno(), 0o600) + with write_new(fpath, owner=(uid, gid), encoding='utf-8') as f: f.write(content) elif daemon_type == NFSGanesha.daemon_type: @@ -2899,6 +3153,10 @@ def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, ceph_iscsi = CephIscsi.init(ctx, fsid, daemon_id) ceph_iscsi.create_daemon_dirs(data_dir, uid, gid) + elif daemon_type == CephNvmeof.daemon_type: + ceph_nvmeof = CephNvmeof.init(ctx, fsid, daemon_id) + ceph_nvmeof.create_daemon_dirs(data_dir, uid, gid) + elif daemon_type == HAproxy.daemon_type: haproxy = HAproxy.init(ctx, fsid, daemon_id) haproxy.create_daemon_dirs(data_dir, uid, gid) @@ -2920,40 +3178,39 @@ def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, def _write_custom_conf_files(ctx: CephadmContext, daemon_type: str, daemon_id: str, fsid: str, uid: int, gid: int) -> None: # mostly making this its own function to make unit testing easier - if 'config_json' not in ctx or not ctx.config_json: + ccfiles = fetch_custom_config_files(ctx) + if not ccfiles: return - config_json = get_custom_config_files(ctx.config_json) custom_config_dir = os.path.join(ctx.data_dir, fsid, 'custom_config_files', f'{daemon_type}.{daemon_id}') if not os.path.exists(custom_config_dir): makedirs(custom_config_dir, uid, gid, 0o755) mandatory_keys = ['mount_path', 'content'] - for ccf in config_json['custom_config_files']: + for ccf in ccfiles: if all(k in ccf for k in mandatory_keys): file_path = os.path.join(custom_config_dir, os.path.basename(ccf['mount_path'])) - with open(file_path, 'w+', encoding='utf-8') as f: - os.fchown(f.fileno(), uid, gid) - os.fchmod(f.fileno(), 0o600) + with write_new(file_path, owner=(uid, gid), encoding='utf-8') as f: f.write(ccf['content']) + # temporary workaround to make custom config files work for tcmu-runner + # container we deploy with iscsi until iscsi is refactored + if daemon_type == 'iscsi': + tcmu_config_dir = custom_config_dir + '.tcmu' + if not os.path.exists(tcmu_config_dir): + makedirs(tcmu_config_dir, uid, gid, 0o755) + tcmu_file_path = os.path.join(tcmu_config_dir, os.path.basename(ccf['mount_path'])) + with write_new(tcmu_file_path, owner=(uid, gid), encoding='utf-8') as f: + f.write(ccf['content']) def get_parm(option: str) -> Dict[str, str]: js = _get_config_json(option) # custom_config_files is a special field that may be in the config # dict. It is used for mounting custom config files into daemon's containers - # and should be accessed through the "get_custom_config_files" function. + # and should be accessed through the "fetch_custom_config_files" function. # For get_parm we need to discard it. js.pop('custom_config_files', None) return js -def get_custom_config_files(option: str) -> Dict[str, List[Dict[str, str]]]: - js = _get_config_json(option) - res: Dict[str, List[Dict[str, str]]] = {'custom_config_files': []} - if 'custom_config_files' in js: - res['custom_config_files'] = js['custom_config_files'] - return res - - def _get_config_json(option: str) -> Dict[str, Any]: if not option: return dict() @@ -2984,13 +3241,94 @@ def _get_config_json(option: str) -> Dict[str, Any]: return js +def fetch_meta(ctx: CephadmContext) -> Dict[str, Any]: + """Return a dict containing metadata about a deployment. + """ + meta = getattr(ctx, 'meta_properties', None) + if meta is not None: + return meta + mjson = getattr(ctx, 'meta_json', None) + if mjson is not None: + meta = json.loads(mjson) or {} + ctx.meta_properties = meta + return meta + return {} + + +def fetch_configs(ctx: CephadmContext) -> Dict[str, str]: + """Return a dict containing arbitrary configuration parameters. + This function filters out the key 'custom_config_files' which + must not be part of a deployment's configuration key-value pairs. + To access custom configuration file data, use `fetch_custom_config_files`. + """ + # ctx.config_blobs is *always* a dict. it is created once when + # a command is parsed/processed and stored "forever" + cfg_blobs = getattr(ctx, 'config_blobs', None) + if cfg_blobs: + cfg_blobs = dict(cfg_blobs) + cfg_blobs.pop('custom_config_files', None) + return cfg_blobs + # ctx.config_json is the legacy equivalent of config_blobs. it is a + # string that either contains json or refers to a file name where + # the file contains json. + cfg_json = getattr(ctx, 'config_json', None) + if cfg_json: + jdata = _get_config_json(cfg_json) or {} + jdata.pop('custom_config_files', None) + return jdata + return {} + + +def fetch_custom_config_files(ctx: CephadmContext) -> List[Dict[str, Any]]: + """Return a list containing dicts that can be used to populate + custom configuration files for containers. + """ + # NOTE: this function works like the opposite of fetch_configs. + # instead of filtering out custom_config_files, it returns only + # the content in that key. + cfg_blobs = getattr(ctx, 'config_blobs', None) + if cfg_blobs: + return cfg_blobs.get('custom_config_files', []) + cfg_json = getattr(ctx, 'config_json', None) + if cfg_json: + jdata = _get_config_json(cfg_json) + return jdata.get('custom_config_files', []) + return [] + + +def fetch_tcp_ports(ctx: CephadmContext) -> List[EndPoint]: + """Return a list of Endpoints, which have a port and ip attribute + """ + ports = getattr(ctx, 'tcp_ports', None) + if ports is None: + ports = [] + if isinstance(ports, str): + ports = list(map(int, ports.split())) + port_ips: Dict[str, str] = {} + port_ips_attr: Union[str, Dict[str, str], None] = getattr(ctx, 'port_ips', None) + if isinstance(port_ips_attr, str): + port_ips = json.loads(port_ips_attr) + elif port_ips_attr is not None: + # if it's not None or a str, assume it's already the dict we want + port_ips = port_ips_attr + + endpoints: List[EndPoint] = [] + for port in ports: + if str(port) in port_ips: + endpoints.append(EndPoint(port_ips[str(port)], port)) + else: + endpoints.append(EndPoint('0.0.0.0', port)) + + return endpoints + + def get_config_and_keyring(ctx): # type: (CephadmContext) -> Tuple[Optional[str], Optional[str]] config = None keyring = None - if 'config_json' in ctx and ctx.config_json: - d = get_parm(ctx.config_json) + d = fetch_configs(ctx) + if d: config = d.get('config') keyring = d.get('keyring') if config and keyring: @@ -3021,6 +3359,8 @@ def get_container_binds(ctx, fsid, daemon_type, daemon_id): if daemon_type == CephIscsi.daemon_type: binds.extend(CephIscsi.get_container_binds()) + if daemon_type == CephNvmeof.daemon_type: + binds.extend(CephNvmeof.get_container_binds()) elif daemon_type == CustomContainer.daemon_type: assert daemon_id cc = CustomContainer.init(ctx, fsid, daemon_id) @@ -3073,10 +3413,14 @@ def get_container_mounts(ctx, fsid, daemon_type, daemon_id, if daemon_type == 'osd': # selinux-policy in the container may not match the host. if HostFacts(ctx).selinux_enabled: - selinux_folder = '/var/lib/ceph/%s/selinux' % fsid - if not os.path.exists(selinux_folder): - os.makedirs(selinux_folder, mode=0o755) - mounts[selinux_folder] = '/sys/fs/selinux:ro' + cluster_dir = f'{ctx.data_dir}/{fsid}' + selinux_folder = f'{cluster_dir}/selinux' + if os.path.exists(cluster_dir): + if not os.path.exists(selinux_folder): + os.makedirs(selinux_folder, mode=0o755) + mounts[selinux_folder] = '/sys/fs/selinux:ro' + else: + logger.error(f'Cluster direcotry {cluster_dir} does not exist.') mounts['/'] = '/rootfs' try: @@ -3133,6 +3477,11 @@ def get_container_mounts(ctx, fsid, daemon_type, daemon_id, data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) mounts.update(HAproxy.get_container_mounts(data_dir)) + if daemon_type == CephNvmeof.daemon_type: + assert daemon_id + data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) + mounts.update(CephNvmeof.get_container_mounts(data_dir)) + if daemon_type == CephIscsi.daemon_type: assert daemon_id data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) @@ -3250,7 +3599,7 @@ def get_container(ctx: CephadmContext, elif daemon_type in Tracing.components: entrypoint = '' name = '%s.%s' % (daemon_type, daemon_id) - config = get_parm(ctx.config_json) + config = fetch_configs(ctx) Tracing.set_configuration(config, daemon_type) envs.extend(Tracing.components[daemon_type].get('envs', [])) elif daemon_type == NFSGanesha.daemon_type: @@ -3267,6 +3616,11 @@ def get_container(ctx: CephadmContext, name = '%s.%s' % (daemon_type, daemon_id) envs.extend(Keepalived.get_container_envs()) container_args.extend(['--cap-add=NET_ADMIN', '--cap-add=NET_RAW']) + elif daemon_type == CephNvmeof.daemon_type: + name = '%s.%s' % (daemon_type, daemon_id) + container_args.extend(['--ulimit', 'memlock=-1:-1']) + container_args.extend(['--ulimit', 'nofile=10240']) + container_args.extend(['--cap-add=SYS_ADMIN', '--cap-add=CAP_SYS_NICE']) elif daemon_type == CephIscsi.daemon_type: entrypoint = CephIscsi.entrypoint name = '%s.%s' % (daemon_type, daemon_id) @@ -3374,26 +3728,29 @@ def extract_uid_gid(ctx, img='', file_path='/var/lib/ceph'): raise RuntimeError('uid/gid not found') -def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, - config=None, keyring=None, - osd_fsid=None, - reconfig=False, - ports=None): - # type: (CephadmContext, str, str, Union[int, str], Optional[CephContainer], int, int, Optional[str], Optional[str], Optional[str], Optional[bool], Optional[List[int]]) -> None - - ports = ports or [] - if any([port_in_use(ctx, port) for port in ports]): - if daemon_type == 'mgr': - # non-fatal for mgr when we are in mgr_standby_modules=false, but we can't - # tell whether that is the case here. - logger.warning( - f"ceph-mgr TCP port(s) {','.join(map(str, ports))} already in use" - ) - else: - raise Error("TCP Port(s) '{}' required for {} already in use".format(','.join(map(str, ports)), daemon_type)) +def deploy_daemon(ctx: CephadmContext, fsid: str, daemon_type: str, + daemon_id: Union[int, str], c: Optional['CephContainer'], + uid: int, gid: int, config: Optional[str] = None, + keyring: Optional[str] = None, osd_fsid: Optional[str] = None, + deployment_type: DeploymentType = DeploymentType.DEFAULT, + endpoints: Optional[List[EndPoint]] = None) -> None: + + endpoints = endpoints or [] + # only check port in use if fresh deployment since service + # we are redeploying/reconfiguring will already be using the port + if deployment_type == DeploymentType.DEFAULT: + if any([port_in_use(ctx, e) for e in endpoints]): + if daemon_type == 'mgr': + # non-fatal for mgr when we are in mgr_standby_modules=false, but we can't + # tell whether that is the case here. + logger.warning( + f"ceph-mgr TCP port(s) {','.join(map(str, endpoints))} already in use" + ) + else: + raise Error("TCP Port(s) '{}' required for {} already in use".format(','.join(map(str, endpoints)), daemon_type)) data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) - if reconfig and not os.path.exists(data_dir): + if deployment_type == DeploymentType.RECONFIG and not os.path.exists(data_dir): raise Error('cannot reconfig, data path %s does not exist' % data_dir) if daemon_type == 'mon' and not os.path.exists(data_dir): assert config @@ -3428,9 +3785,7 @@ def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, ).run() # write conf - with open(mon_dir + '/config', 'w') as f: - os.fchown(f.fileno(), uid, gid) - os.fchmod(f.fileno(), 0o600) + with write_new(mon_dir + '/config', owner=(uid, gid)) as f: f.write(config) else: # dirs, conf, keyring @@ -3440,12 +3795,11 @@ def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, uid, gid, config, keyring) - if not reconfig: + # only write out unit files and start daemon + # with systemd if this is not a reconfig + if deployment_type != DeploymentType.RECONFIG: if daemon_type == CephadmAgent.daemon_type: - if ctx.config_json == '-': - config_js = get_parm('-') - else: - config_js = get_parm(ctx.config_json) + config_js = fetch_configs(ctx) assert isinstance(config_js, dict) cephadm_agent = CephadmAgent(ctx, fsid, daemon_id) @@ -3453,30 +3807,28 @@ def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, else: if c: deploy_daemon_units(ctx, fsid, uid, gid, daemon_type, daemon_id, - c, osd_fsid=osd_fsid, ports=ports) + c, osd_fsid=osd_fsid, endpoints=endpoints) else: raise RuntimeError('attempting to deploy a daemon without a container image') if not os.path.exists(data_dir + '/unit.created'): - with open(data_dir + '/unit.created', 'w') as f: - os.fchmod(f.fileno(), 0o600) - os.fchown(f.fileno(), uid, gid) + with write_new(data_dir + '/unit.created', owner=(uid, gid)) as f: f.write('mtime is time the daemon deployment was created\n') - with open(data_dir + '/unit.configured', 'w') as f: + with write_new(data_dir + '/unit.configured', owner=(uid, gid)) as f: f.write('mtime is time we were last configured\n') - os.fchmod(f.fileno(), 0o600) - os.fchown(f.fileno(), uid, gid) update_firewalld(ctx, daemon_type) # Open ports explicitly required for the daemon - if ports: + if endpoints: fw = Firewalld(ctx) - fw.open_ports(ports + fw.external_ports.get(daemon_type, [])) + fw.open_ports([e.port for e in endpoints] + fw.external_ports.get(daemon_type, [])) fw.apply_rules() - if reconfig and daemon_type not in Ceph.daemons: + # If this was a reconfig and the daemon is not a Ceph daemon, restart it + # so it can pick up potential changes to its configuration files + if deployment_type == DeploymentType.RECONFIG and daemon_type not in Ceph.daemons: # ceph daemons do not need a restart; others (presumably) do to pick # up the new config call_throws(ctx, ['systemctl', 'reset-failed', @@ -3547,7 +3899,7 @@ def deploy_daemon_units( enable: bool = True, start: bool = True, osd_fsid: Optional[str] = None, - ports: Optional[List[int]] = None, + endpoints: Optional[List[EndPoint]] = None, ) -> None: # cmd @@ -3560,8 +3912,10 @@ def deploy_daemon_units( f.write(f'! {container_exists % c.cname} || {" ".join(c.stop_cmd(timeout=timeout))} \n') data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) - with open(data_dir + '/unit.run.new', 'w') as f, \ - open(data_dir + '/unit.meta.new', 'w') as metaf: + run_file_path = data_dir + '/unit.run' + meta_file_path = data_dir + '/unit.meta' + with write_new(run_file_path) as f, write_new(meta_file_path) as metaf: + f.write('set -e\n') if daemon_type in Ceph.daemons: @@ -3625,30 +3979,24 @@ def deploy_daemon_units( _write_container_cmd_to_bash(ctx, f, c, '%s.%s' % (daemon_type, str(daemon_id))) # some metadata about the deploy - meta: Dict[str, Any] = {} - if 'meta_json' in ctx and ctx.meta_json: - meta = json.loads(ctx.meta_json) or {} + meta: Dict[str, Any] = fetch_meta(ctx) meta.update({ 'memory_request': int(ctx.memory_request) if ctx.memory_request else None, 'memory_limit': int(ctx.memory_limit) if ctx.memory_limit else None, }) if not meta.get('ports'): - meta['ports'] = ports + if endpoints: + meta['ports'] = [e.port for e in endpoints] + else: + meta['ports'] = [] metaf.write(json.dumps(meta, indent=4) + '\n') - os.fchmod(f.fileno(), 0o600) - os.fchmod(metaf.fileno(), 0o600) - os.rename(data_dir + '/unit.run.new', - data_dir + '/unit.run') - os.rename(data_dir + '/unit.meta.new', - data_dir + '/unit.meta') - timeout = 30 if daemon_type == 'osd' else None # post-stop command(s) - with open(data_dir + '/unit.poststop.new', 'w') as f: + with write_new(data_dir + '/unit.poststop') as f: # this is a fallback to eventually stop any underlying container that was not stopped properly by unit.stop, # this could happen in very slow setups as described in the issue https://tracker.ceph.com/issues/58242. - add_stop_actions(f, timeout) + add_stop_actions(cast(TextIO, f), timeout) if daemon_type == 'osd': assert osd_fsid poststop = get_ceph_volume_container( @@ -3672,23 +4020,14 @@ def deploy_daemon_units( f.write('! ' + 'rm ' + runtime_dir + '/ceph-%s@%s.%s.service-pid' % (fsid, daemon_type, str(daemon_id) + '.tcmu') + '\n') f.write('! ' + 'rm ' + runtime_dir + '/ceph-%s@%s.%s.service-cid' % (fsid, daemon_type, str(daemon_id) + '.tcmu') + '\n') f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=False)) + '\n') - os.fchmod(f.fileno(), 0o600) - os.rename(data_dir + '/unit.poststop.new', - data_dir + '/unit.poststop') # post-stop command(s) - with open(data_dir + '/unit.stop.new', 'w') as f: - add_stop_actions(f, timeout) - os.fchmod(f.fileno(), 0o600) - os.rename(data_dir + '/unit.stop.new', - data_dir + '/unit.stop') + with write_new(data_dir + '/unit.stop') as f: + add_stop_actions(cast(TextIO, f), timeout) if c: - with open(data_dir + '/unit.image.new', 'w') as f: + with write_new(data_dir + '/unit.image') as f: f.write(c.image + '\n') - os.fchmod(f.fileno(), 0o600) - os.rename(data_dir + '/unit.image.new', - data_dir + '/unit.image') # sysctl install_sysctl(ctx, fsid, daemon_type) @@ -3697,10 +4036,8 @@ def deploy_daemon_units( install_base_units(ctx, fsid) unit = get_unit_file(ctx, fsid) unit_file = 'ceph-%s@.service' % (fsid) - with open(ctx.unit_dir + '/' + unit_file + '.new', 'w') as f: + with write_new(ctx.unit_dir + '/' + unit_file, perms=None) as f: f.write(unit) - os.rename(ctx.unit_dir + '/' + unit_file + '.new', - ctx.unit_dir + '/' + unit_file) call_throws(ctx, ['systemctl', 'daemon-reload']) unit_name = get_unit_name(fsid, daemon_type, daemon_id) @@ -3852,7 +4189,7 @@ def install_sysctl(ctx: CephadmContext, fsid: str, daemon_type: str) -> None: *lines, '', ] - with open(conf, 'w') as f: + with write_new(conf, owner=None, perms=None) as f: f.write('\n'.join(lines)) conf = Path(ctx.sysctl_dir).joinpath(f'90-ceph-{fsid}-{daemon_type}.conf') @@ -3864,6 +4201,8 @@ def install_sysctl(ctx: CephadmContext, fsid: str, daemon_type: str) -> None: lines = HAproxy.get_sysctl_settings() elif daemon_type == 'keepalived': lines = Keepalived.get_sysctl_settings() + elif daemon_type == CephNvmeof.daemon_type: + lines = CephNvmeof.get_sysctl_settings() lines = filter_sysctl_settings(ctx, lines) # apply the sysctl settings @@ -3950,14 +4289,12 @@ def install_base_units(ctx, fsid): """ # global unit existed = os.path.exists(ctx.unit_dir + '/ceph.target') - with open(ctx.unit_dir + '/ceph.target.new', 'w') as f: + with write_new(ctx.unit_dir + '/ceph.target', perms=None) as f: f.write('[Unit]\n' 'Description=All Ceph clusters and services\n' '\n' '[Install]\n' 'WantedBy=multi-user.target\n') - os.rename(ctx.unit_dir + '/ceph.target.new', - ctx.unit_dir + '/ceph.target') if not existed: # we disable before enable in case a different ceph.target # (from the traditional package) is present; while newer @@ -3970,7 +4307,7 @@ def install_base_units(ctx, fsid): # cluster unit existed = os.path.exists(ctx.unit_dir + '/ceph-%s.target' % fsid) - with open(ctx.unit_dir + '/ceph-%s.target.new' % fsid, 'w') as f: + with write_new(ctx.unit_dir + f'/ceph-{fsid}.target', perms=None) as f: f.write( '[Unit]\n' 'Description=Ceph cluster {fsid}\n' @@ -3981,8 +4318,6 @@ def install_base_units(ctx, fsid): 'WantedBy=multi-user.target ceph.target\n'.format( fsid=fsid) ) - os.rename(ctx.unit_dir + '/ceph-%s.target.new' % fsid, - ctx.unit_dir + '/ceph-%s.target' % fsid) if not existed: call_throws(ctx, ['systemctl', 'enable', 'ceph-%s.target' % fsid]) call_throws(ctx, ['systemctl', 'start', 'ceph-%s.target' % fsid]) @@ -3992,7 +4327,7 @@ def install_base_units(ctx, fsid): return # logrotate for the cluster - with open(ctx.logrotate_dir + '/ceph-%s' % fsid, 'w') as f: + with write_new(ctx.logrotate_dir + f'/ceph-{fsid}', perms=None) as f: """ This is a bit sloppy in that the killall/pkill will touch all ceph daemons in all containers, but I don't see an elegant way to send SIGHUP *just* to @@ -4001,6 +4336,18 @@ def install_base_units(ctx, fsid): first child (bash), but that isn't the ceph daemon. This is simpler and should be harmless. """ + targets: List[str] = [ + 'ceph-mon', + 'ceph-mgr', + 'ceph-mds', + 'ceph-osd', + 'ceph-fuse', + 'radosgw', + 'rbd-mirror', + 'cephfs-mirror', + 'tcmu-runner' + ] + f.write("""# created by cephadm /var/log/ceph/%s/*.log { rotate 7 @@ -4008,13 +4355,13 @@ def install_base_units(ctx, fsid): compress sharedscripts postrotate - killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror cephfs-mirror || pkill -1 -x 'ceph-mon|ceph-mgr|ceph-mds|ceph-osd|ceph-fuse|radosgw|rbd-mirror|cephfs-mirror' || true + killall -q -1 %s || pkill -1 -x '%s' || true endscript missingok notifempty su root root } -""" % fsid) +""" % (fsid, ' '.join(targets), '|'.join(targets))) def get_unit_file(ctx, fsid): @@ -4391,9 +4738,8 @@ class MgrListener(Thread): for filename in config: if filename in self.agent.required_files: file_path = os.path.join(self.agent.daemon_dir, filename) - with open(os.open(file_path + '.new', os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with write_new(file_path) as f: f.write(config[filename]) - os.rename(file_path + '.new', file_path) self.agent.pull_conf_settings() self.agent.wakeup() @@ -4454,27 +4800,21 @@ class CephadmAgent(): for filename in config: if filename in self.required_files: file_path = os.path.join(self.daemon_dir, filename) - with open(os.open(file_path + '.new', os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with write_new(file_path) as f: f.write(config[filename]) - os.rename(file_path + '.new', file_path) unit_run_path = os.path.join(self.daemon_dir, 'unit.run') - with open(os.open(unit_run_path + '.new', os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with write_new(unit_run_path) as f: f.write(self.unit_run()) - os.rename(unit_run_path + '.new', unit_run_path) - meta: Dict[str, Any] = {} + meta: Dict[str, Any] = fetch_meta(self.ctx) meta_file_path = os.path.join(self.daemon_dir, 'unit.meta') - if 'meta_json' in self.ctx and self.ctx.meta_json: - meta = json.loads(self.ctx.meta_json) or {} - with open(os.open(meta_file_path + '.new', os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with write_new(meta_file_path) as f: f.write(json.dumps(meta, indent=4) + '\n') - os.rename(meta_file_path + '.new', meta_file_path) unit_file_path = os.path.join(self.ctx.unit_dir, self.unit_name()) - with open(os.open(unit_file_path + '.new', os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: + with write_new(unit_file_path) as f: f.write(self.unit_file()) - os.rename(unit_file_path + '.new', unit_file_path) call_throws(self.ctx, ['systemctl', 'daemon-reload']) call(self.ctx, ['systemctl', 'stop', self.unit_name()], @@ -4557,7 +4897,7 @@ WantedBy=ceph-{fsid}.target try: for _ in range(1001): - if not port_in_use(self.ctx, self.starting_port): + if not port_in_use(self.ctx, EndPoint('0.0.0.0', self.starting_port)): self.listener_port = str(self.starting_port) break self.starting_port += 1 @@ -4833,15 +5173,24 @@ def command_agent(ctx: CephadmContext) -> None: ################################## - -@infer_image +@executes_early def command_version(ctx): # type: (CephadmContext) -> int - c = CephContainer(ctx, ctx.image, 'ceph', ['--version']) - out, err, ret = call(ctx, c.run_cmd(), desc=c.entrypoint) - if not ret: - print(out.strip()) - return ret + import importlib + + try: + vmod = importlib.import_module('_version') + except ImportError: + print('cephadm version UNKNOWN') + return 1 + _unset = '' + print('cephadm version {0} ({1}) {2} ({3})'.format( + getattr(vmod, 'CEPH_GIT_NICE_VER', _unset), + getattr(vmod, 'CEPH_GIT_VER', _unset), + getattr(vmod, 'CEPH_RELEASE_NAME', _unset), + getattr(vmod, 'CEPH_RELEASE_TYPE', _unset), + )) + return 0 ################################## @@ -5333,7 +5682,7 @@ def create_mon( fsid: str, mon_id: str ) -> None: mon_c = get_container(ctx, fsid, 'mon', mon_id) - ctx.meta_json = json.dumps({'service_name': 'mon'}) + ctx.meta_properties = {'service_name': 'mon'} deploy_daemon(ctx, fsid, 'mon', mon_id, mon_c, uid, gid, config=None, keyring=None) @@ -5380,12 +5729,12 @@ def create_mgr( mgr_keyring = '[mgr.%s]\n\tkey = %s\n' % (mgr_id, mgr_key) mgr_c = get_container(ctx, fsid, 'mgr', mgr_id) # Note:the default port used by the Prometheus node exporter is opened in fw - ctx.meta_json = json.dumps({'service_name': 'mgr'}) - ports = [9283, 8765] + ctx.meta_properties = {'service_name': 'mgr'} + endpoints = [EndPoint('0.0.0.0', 9283), EndPoint('0.0.0.0', 8765)] if not ctx.skip_monitoring_stack: - ports.append(8443) + endpoints.append(EndPoint('0.0.0.0', 8443)) deploy_daemon(ctx, fsid, 'mgr', mgr_id, mgr_c, uid, gid, - config=config, keyring=mgr_keyring, ports=ports) + config=config, keyring=mgr_keyring, endpoints=endpoints) # wait for the service to become available logger.info('Waiting for mgr to start...') @@ -5402,6 +5751,7 @@ def create_mgr( except Exception as e: logger.debug('status failed: %s' % e) return False + is_available(ctx, 'mgr', is_mgr_available) @@ -5428,6 +5778,15 @@ def prepare_ssh( cli(['cephadm', 'set-priv-key', '-i', '/tmp/cephadm-ssh-key'], extra_mounts=mounts) cli(['cephadm', 'set-pub-key', '-i', '/tmp/cephadm-ssh-key.pub'], extra_mounts=mounts) ssh_pub = cli(['cephadm', 'get-pub-key']) + authorize_ssh_key(ssh_pub, ctx.ssh_user) + elif ctx.ssh_private_key and ctx.ssh_signed_cert: + logger.info('Using provided ssh private key and signed cert ...') + mounts = { + pathify(ctx.ssh_private_key.name): '/tmp/cephadm-ssh-key:z', + pathify(ctx.ssh_signed_cert.name): '/tmp/cephadm-ssh-key-cert.pub:z' + } + cli(['cephadm', 'set-priv-key', '-i', '/tmp/cephadm-ssh-key'], extra_mounts=mounts) + cli(['cephadm', 'set-signed-cert', '-i', '/tmp/cephadm-ssh-key-cert.pub'], extra_mounts=mounts) else: logger.info('Generating ssh key...') cli(['cephadm', 'generate-key']) @@ -5435,8 +5794,7 @@ def prepare_ssh( with open(ctx.output_pub_ssh_key, 'w') as f: f.write(ssh_pub) logger.info('Wrote public SSH key to %s' % ctx.output_pub_ssh_key) - - authorize_ssh_key(ssh_pub, ctx.ssh_user) + authorize_ssh_key(ssh_pub, ctx.ssh_user) host = get_hostname() logger.info('Adding host %s...' % host) @@ -5649,8 +6007,10 @@ def finish_bootstrap_config( cli(['config', 'set', 'global', 'container_image', f'{ctx.image}']) if mon_network: - logger.info(f'Setting mon public_network to {mon_network}') - cli(['config', 'set', 'mon', 'public_network', mon_network]) + cp = read_config(ctx.config) + cfg_section = 'global' if cp.has_option('global', 'public_network') else 'mon' + logger.info(f'Setting public_network to {mon_network} in {cfg_section} config section') + cli(['config', 'set', cfg_section, 'public_network', mon_network]) if cluster_network: logger.info(f'Setting cluster_network to {cluster_network}') @@ -5787,6 +6147,43 @@ def save_cluster_config(ctx: CephadmContext, uid: int, gid: int, fsid: str) -> N logger.warning(f'Cannot create cluster configuration directory {conf_dir}') +def rollback(func: FuncT) -> FuncT: + """ + """ + @wraps(func) + def _rollback(ctx: CephadmContext) -> Any: + try: + return func(ctx) + except ClusterAlreadyExists: + # another cluster with the provided fsid already exists: don't remove. + raise + except (KeyboardInterrupt, Exception) as e: + logger.error(f'{type(e).__name__}: {e}') + if ctx.cleanup_on_failure: + logger.info('\n\n' + '\t***************\n' + '\tCephadm hit an issue during cluster installation. Current cluster files will be deleted automatically,\n' + '\tto disable this behaviour do not pass the --cleanup-on-failure flag. In case of any previous\n' + '\tbroken installation user must use the following command to completely delete the broken cluster:\n\n' + '\t> cephadm rm-cluster --force --zap-osds --fsid \n\n' + '\tfor more information please refer to https://docs.ceph.com/en/latest/cephadm/operations/#purging-a-cluster\n' + '\t***************\n\n') + _rm_cluster(ctx, keep_logs=False, zap_osds=False) + else: + logger.info('\n\n' + '\t***************\n' + '\tCephadm hit an issue during cluster installation. Current cluster files will NOT BE DELETED automatically to change\n' + '\tthis behaviour you can pass the --cleanup-on-failure. To remove this broken cluster manually please run:\n\n' + f'\t > cephadm rm-cluster --force --fsid {ctx.fsid}\n\n' + '\tin case of any previous broken installation user must use the rm-cluster command to delete the broken cluster:\n\n' + '\t > cephadm rm-cluster --force --zap-osds --fsid \n\n' + '\tfor more information please refer to https://docs.ceph.com/en/latest/cephadm/operations/#purging-a-cluster\n' + '\t***************\n\n') + raise + return cast(FuncT, _rollback) + + +@rollback @default_image def command_bootstrap(ctx): # type: (CephadmContext) -> int @@ -5800,23 +6197,38 @@ def command_bootstrap(ctx): if not ctx.output_pub_ssh_key: ctx.output_pub_ssh_key = os.path.join(ctx.output_dir, CEPH_PUBKEY) - if bool(ctx.ssh_private_key) is not bool(ctx.ssh_public_key): - raise Error('--ssh-private-key and --ssh-public-key must be provided together or not at all.') + if ( + (bool(ctx.ssh_private_key) is not bool(ctx.ssh_public_key)) + and (bool(ctx.ssh_private_key) is not bool(ctx.ssh_signed_cert)) + ): + raise Error('--ssh-private-key must be passed with either --ssh-public-key in the case of standard pubkey ' + 'authentication or with --ssh-signed-cert in the case of CA signed signed keys or not provided at all.') + + if (bool(ctx.ssh_public_key) and bool(ctx.ssh_signed_cert)): + raise Error('--ssh-public-key and --ssh-signed-cert are mututally exclusive. --ssh-public-key is intended ' + 'for standard pubkey encryption where the public key is set as an authorized key on cluster hosts. ' + '--ssh-signed-cert is intended for the CA signed keys use case where cluster hosts are configured to trust ' + 'a CA pub key and authentication during SSH is done by authenticating the signed cert, requiring no ' + 'public key to be installed on the cluster hosts.') if ctx.fsid: data_dir_base = os.path.join(ctx.data_dir, ctx.fsid) if os.path.exists(data_dir_base): - raise Error(f"A cluster with the same fsid '{ctx.fsid}' already exists.") + raise ClusterAlreadyExists(f"A cluster with the same fsid '{ctx.fsid}' already exists.") else: logger.warning('Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts.') + # initial vars + ctx.fsid = ctx.fsid or make_fsid() + fsid = ctx.fsid + if not is_fsid(fsid): + raise Error('not an fsid: %s' % fsid) + # verify output files - for f in [ctx.output_config, ctx.output_keyring, - ctx.output_pub_ssh_key]: + for f in [ctx.output_config, ctx.output_keyring, ctx.output_pub_ssh_key]: if not ctx.allow_overwrite: if os.path.exists(f): - raise Error('%s already exists; delete or pass ' - '--allow-overwrite to overwrite' % f) + raise ClusterAlreadyExists('%s already exists; delete or pass --allow-overwrite to overwrite' % f) dirname = os.path.dirname(f) if dirname and not os.path.exists(dirname): fname = os.path.basename(f) @@ -5837,12 +6249,7 @@ def command_bootstrap(ctx): else: logger.info('Skip prepare_host') - # initial vars - fsid = ctx.fsid or make_fsid() - if not is_fsid(fsid): - raise Error('not an fsid: %s' % fsid) logger.info('Cluster fsid: %s' % fsid) - hostname = get_hostname() if '.' in hostname and not ctx.allow_fqdn_hostname: raise Error('hostname is a fully qualified domain name (%s); either fix (e.g., "sudo hostname %s" or similar) or pass --allow-fqdn-hostname' % (hostname, hostname.split('.')[0])) @@ -5887,9 +6294,7 @@ def command_bootstrap(ctx): (mon_dir, log_dir) = prepare_create_mon(ctx, uid, gid, fsid, mon_id, bootstrap_keyring.name, monmap.name) - with open(mon_dir + '/config', 'w') as f: - os.fchown(f.fileno(), uid, gid) - os.fchmod(f.fileno(), 0o600) + with write_new(mon_dir + '/config', owner=(uid, gid)) as f: f.write(config) make_var_run(ctx, fsid, uid, gid) @@ -5924,8 +6329,7 @@ def command_bootstrap(ctx): cluster_network, ipv6_cluster_network) # output files - with open(ctx.output_keyring, 'w') as f: - os.fchmod(f.fileno(), 0o600) + with write_new(ctx.output_keyring) as f: f.write('[client.admin]\n' '\tkey = ' + admin_key + '\n') logger.info('Wrote keyring to %s' % ctx.output_keyring) @@ -6000,7 +6404,10 @@ def command_bootstrap(ctx): with open(ctx.apply_spec) as f: host_dicts = _extract_host_info_from_applied_spec(f) for h in host_dicts: - _distribute_ssh_keys(ctx, h, hostname) + if ctx.ssh_signed_cert: + logger.info('Key distribution is not supported for signed CA key setups. Skipping ...') + else: + _distribute_ssh_keys(ctx, h, hostname) mounts = {} mounts[pathify(ctx.apply_spec)] = '/tmp/spec.yml:ro' @@ -6076,7 +6483,7 @@ def registry_login(ctx: CephadmContext, url: Optional[str], username: Optional[s cmd.append('--authfile=/etc/ceph/podman-auth.json') out, _, _ = call_throws(ctx, cmd) if isinstance(engine, Podman): - os.chmod('/etc/ceph/podman-auth.json', 0o600) + os.chmod('/etc/ceph/podman-auth.json', DEFAULT_MODE) except Exception: raise Error('Failed to login to custom registry @ %s as %s with given password' % (ctx.registry_url, ctx.registry_username)) @@ -6115,10 +6522,10 @@ def get_deployment_container(ctx: CephadmContext, c.container_args.extend(ctx.extra_container_args) if 'extra_entrypoint_args' in ctx and ctx.extra_entrypoint_args: c.args.extend(ctx.extra_entrypoint_args) - if 'config_json' in ctx and ctx.config_json: - conf_files = get_custom_config_files(ctx.config_json) + ccfiles = fetch_custom_config_files(ctx) + if ccfiles: mandatory_keys = ['mount_path', 'content'] - for conf in conf_files['custom_config_files']: + for conf in ccfiles: if all(k in conf for k in mandatory_keys): mount_path = conf['mount_path'] file_path = os.path.join( @@ -6132,50 +6539,118 @@ def get_deployment_container(ctx: CephadmContext, return c +def get_deployment_type(ctx: CephadmContext, daemon_type: str, daemon_id: str) -> DeploymentType: + deployment_type: DeploymentType = DeploymentType.DEFAULT + if ctx.reconfig: + deployment_type = DeploymentType.RECONFIG + unit_name = get_unit_name(ctx.fsid, daemon_type, daemon_id) + (_, state, _) = check_unit(ctx, unit_name) + if state == 'running' or is_container_running(ctx, CephContainer.for_daemon(ctx, ctx.fsid, daemon_type, daemon_id, 'bash')): + # if reconfig was set, that takes priority over redeploy. If + # this is considered a fresh deployment at this stage, + # mark it as a redeploy to avoid port checking + if deployment_type == DeploymentType.DEFAULT: + deployment_type = DeploymentType.REDEPLOY + + logger.info(f'{deployment_type.value} daemon {ctx.name} ...') + + return deployment_type + + @default_image +@deprecated_command def command_deploy(ctx): # type: (CephadmContext) -> None - daemon_type, daemon_id = ctx.name.split('.', 1) + _common_deploy(ctx) - lock = FileLock(ctx, ctx.fsid) - lock.acquire() +def read_configuration_source(ctx: CephadmContext) -> Dict[str, Any]: + """Read a JSON configuration based on the `ctx.source` value.""" + source = '-' + if 'source' in ctx and ctx.source: + source = ctx.source + if source == '-': + config_data = json.load(sys.stdin) + else: + with open(source, 'rb') as fh: + config_data = json.load(fh) + logger.debug('Loaded deploy configuration: %r', config_data) + return config_data + + +def apply_deploy_config_to_ctx( + config_data: Dict[str, Any], + ctx: CephadmContext, +) -> None: + """Bind properties taken from the config_data dictionary to our ctx, + similar to how cli options on `deploy` are bound to the context. + """ + ctx.name = config_data['name'] + image = config_data.get('image', '') + if image: + ctx.image = image + if 'fsid' in config_data: + ctx.fsid = config_data['fsid'] + if 'meta' in config_data: + ctx.meta_properties = config_data['meta'] + if 'config_blobs' in config_data: + ctx.config_blobs = config_data['config_blobs'] + + # many functions don't check that an attribute is set on the ctx + # (with getattr or the '__contains__' func on ctx). + # This reuses the defaults from the CLI options so we don't + # have to repeat things and they can stay in sync. + facade = ArgumentFacade() + _add_deploy_parser_args(facade) + facade.apply(ctx) + for key, value in config_data.get('params', {}).items(): + if key not in facade.defaults: + logger.warning('unexpected parameter: %r=%r', key, value) + setattr(ctx, key, value) + update_default_image(ctx) + logger.debug('Determined image: %r', ctx.image) + + +def command_deploy_from(ctx: CephadmContext) -> None: + """The deploy-from command is similar to deploy but sources nearly all + configuration parameters from an input JSON configuration file. + """ + config_data = read_configuration_source(ctx) + apply_deploy_config_to_ctx(config_data, ctx) + _common_deploy(ctx) + + +def _common_deploy(ctx: CephadmContext) -> None: + daemon_type, daemon_id = ctx.name.split('.', 1) if daemon_type not in get_supported_daemons(): raise Error('daemon type %s not recognized' % daemon_type) - redeploy = False - unit_name = get_unit_name(ctx.fsid, daemon_type, daemon_id) - (_, state, _) = check_unit(ctx, unit_name) - if state == 'running' or is_container_running(ctx, CephContainer.for_daemon(ctx, ctx.fsid, daemon_type, daemon_id, 'bash')): - redeploy = True + lock = FileLock(ctx, ctx.fsid) + lock.acquire() - if ctx.reconfig: - logger.info('%s daemon %s ...' % ('Reconfig', ctx.name)) - elif redeploy: - logger.info('%s daemon %s ...' % ('Redeploy', ctx.name)) - else: - logger.info('%s daemon %s ...' % ('Deploy', ctx.name)) + deployment_type = get_deployment_type(ctx, daemon_type, daemon_id) # Migrate sysctl conf files from /usr/lib to /etc migrate_sysctl_dir(ctx, ctx.fsid) # Get and check ports explicitly required to be opened - daemon_ports = [] # type: List[int] + endpoints = fetch_tcp_ports(ctx) + _dispatch_deploy(ctx, daemon_type, daemon_id, endpoints, deployment_type) - # only check port in use if not reconfig or redeploy since service - # we are redeploying/reconfiguring will already be using the port - if not ctx.reconfig and not redeploy: - if ctx.tcp_ports: - daemon_ports = list(map(int, ctx.tcp_ports.split())) +def _dispatch_deploy( + ctx: CephadmContext, + daemon_type: str, + daemon_id: str, + daemon_endpoints: List[EndPoint], + deployment_type: DeploymentType, +) -> None: if daemon_type in Ceph.daemons: config, keyring = get_config_and_keyring(ctx) uid, gid = extract_uid_gid(ctx) make_var_run(ctx, ctx.fsid, uid, gid) - config_json: Optional[Dict[str, str]] = None - if 'config_json' in ctx and ctx.config_json: - config_json = get_parm(ctx.config_json) + config_json = fetch_configs(ctx) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id, ptrace=ctx.allow_ptrace) @@ -6191,14 +6666,14 @@ def command_deploy(ctx): deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, osd_fsid=ctx.osd_fsid, - reconfig=ctx.reconfig, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type in Monitoring.components: # monitoring daemon - prometheus, grafana, alertmanager, node-exporter # Default Checks # make sure provided config-json is sufficient - config = get_parm(ctx.config_json) # type: ignore + config = fetch_configs(ctx) # type: ignore required_files = Monitoring.components[daemon_type].get('config-json-files', list()) required_args = Monitoring.components[daemon_type].get('config-json-args', list()) if required_files: @@ -6213,12 +6688,14 @@ def command_deploy(ctx): uid, gid = extract_uid_gid_monitoring(ctx, daemon_type) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, - reconfig=ctx.reconfig, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type == NFSGanesha.daemon_type: - if not ctx.reconfig and not redeploy and not daemon_ports: - daemon_ports = list(NFSGanesha.port_map.values()) + # only check ports if this is a fresh deployment + if deployment_type == DeploymentType.DEFAULT and not daemon_endpoints: + nfs_ports = list(NFSGanesha.port_map.values()) + daemon_endpoints = [EndPoint('0.0.0.0', p) for p in nfs_ports] config, keyring = get_config_and_keyring(ctx) # TODO: extract ganesha uid/gid (997, 994) ? @@ -6226,8 +6703,8 @@ def command_deploy(ctx): c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, - reconfig=ctx.reconfig, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type == CephIscsi.daemon_type: config, keyring = get_config_and_keyring(ctx) @@ -6235,55 +6712,68 @@ def command_deploy(ctx): c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, - reconfig=ctx.reconfig, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) + elif daemon_type == CephNvmeof.daemon_type: + config, keyring = get_config_and_keyring(ctx) + uid, gid = 167, 167 # TODO: need to get properly the uid/gid + c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) + deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, + config=config, keyring=keyring, + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type in Tracing.components: uid, gid = 65534, 65534 c = get_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, - reconfig=ctx.reconfig, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type == HAproxy.daemon_type: haproxy = HAproxy.init(ctx, ctx.fsid, daemon_id) uid, gid = haproxy.extract_uid_gid_haproxy() c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, - reconfig=ctx.reconfig, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type == Keepalived.daemon_type: keepalived = Keepalived.init(ctx, ctx.fsid, daemon_id) uid, gid = keepalived.extract_uid_gid_keepalived() c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, - reconfig=ctx.reconfig, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type == CustomContainer.daemon_type: cc = CustomContainer.init(ctx, ctx.fsid, daemon_id) - if not ctx.reconfig and not redeploy: - daemon_ports.extend(cc.ports) + # only check ports if this is a fresh deployment + if deployment_type == DeploymentType.DEFAULT: + daemon_endpoints.extend([EndPoint('0.0.0.0', p) for p in cc.ports]) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id, privileged=cc.privileged, ptrace=ctx.allow_ptrace) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid=cc.uid, gid=cc.gid, config=None, - keyring=None, reconfig=ctx.reconfig, - ports=daemon_ports) + keyring=None, + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type == CephadmAgent.daemon_type: # get current user gid and uid uid = os.getuid() gid = os.getgid() deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, None, - uid, gid, ports=daemon_ports) + uid, gid, + deployment_type=deployment_type, + endpoints=daemon_endpoints) elif daemon_type == SNMPGateway.daemon_type: sc = SNMPGateway.init(ctx, ctx.fsid, daemon_id) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, sc.uid, sc.gid, - ports=daemon_ports) + deployment_type=deployment_type, + endpoints=daemon_endpoints) else: raise Error('daemon type {} not implemented in command_deploy function' @@ -6398,6 +6888,10 @@ def command_shell(ctx): privileged=True) command = c.shell_cmd(command) + if ctx.dry_run: + print(' '.join(shlex.quote(arg) for arg in command)) + return 0 + return call_timeout(ctx, command, ctx.timeout) ################################## @@ -6774,6 +7268,8 @@ def list_daemons(ctx, detail=True, legacy_dir=None): version = NFSGanesha.get_version(ctx, container_id) if daemon_type == CephIscsi.daemon_type: version = CephIscsi.get_version(ctx, container_id) + if daemon_type == CephNvmeof.daemon_type: + version = CephNvmeof.get_version(ctx, container_id) elif not version: if daemon_type in Ceph.daemons: out, err, code = call(ctx, @@ -6806,7 +7302,8 @@ def list_daemons(ctx, detail=True, legacy_dir=None): 'haproxy', '-v'], verbosity=CallVerbosity.QUIET) if not code and \ - out.startswith('HA-Proxy version '): + out.startswith('HA-Proxy version ') or \ + out.startswith('HAProxy version '): version = out.split(' ')[2] seen_versions[image_id] = version elif daemon_type == 'keepalived': @@ -7208,6 +7705,10 @@ def command_adopt_prometheus(ctx, daemon_id, fsid): # type: (CephadmContext, str, str) -> None daemon_type = 'prometheus' (uid, gid) = extract_uid_gid_monitoring(ctx, daemon_type) + # should try to set the ports we know cephadm defaults + # to for these services in the firewall. + ports = Monitoring.port_map['prometheus'] + endpoints = [EndPoint('0.0.0.0', p) for p in ports] _stop_and_disable(ctx, 'prometheus') @@ -7229,7 +7730,8 @@ def command_adopt_prometheus(ctx, daemon_id, fsid): make_var_run(ctx, fsid, uid, gid) c = get_container(ctx, fsid, daemon_type, daemon_id) - deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid) + deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, + deployment_type=DeploymentType.REDEPLOY, endpoints=endpoints) update_firewalld(ctx, daemon_type) @@ -7238,6 +7740,10 @@ def command_adopt_grafana(ctx, daemon_id, fsid): daemon_type = 'grafana' (uid, gid) = extract_uid_gid_monitoring(ctx, daemon_type) + # should try to set the ports we know cephadm defaults + # to for these services in the firewall. + ports = Monitoring.port_map['grafana'] + endpoints = [EndPoint('0.0.0.0', p) for p in ports] _stop_and_disable(ctx, 'grafana-server') @@ -7283,7 +7789,8 @@ def command_adopt_grafana(ctx, daemon_id, fsid): make_var_run(ctx, fsid, uid, gid) c = get_container(ctx, fsid, daemon_type, daemon_id) - deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid) + deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, + deployment_type=DeploymentType.REDEPLOY, endpoints=endpoints) update_firewalld(ctx, daemon_type) @@ -7292,6 +7799,10 @@ def command_adopt_alertmanager(ctx, daemon_id, fsid): daemon_type = 'alertmanager' (uid, gid) = extract_uid_gid_monitoring(ctx, daemon_type) + # should try to set the ports we know cephadm defaults + # to for these services in the firewall. + ports = Monitoring.port_map['alertmanager'] + endpoints = [EndPoint('0.0.0.0', p) for p in ports] _stop_and_disable(ctx, 'prometheus-alertmanager') @@ -7313,7 +7824,8 @@ def command_adopt_alertmanager(ctx, daemon_id, fsid): make_var_run(ctx, fsid, uid, gid) c = get_container(ctx, fsid, daemon_type, daemon_id) - deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid) + deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, + deployment_type=DeploymentType.REDEPLOY, endpoints=endpoints) update_firewalld(ctx, daemon_type) @@ -7325,7 +7837,7 @@ def _adjust_grafana_ini(filename): try: with open(filename, 'r') as grafana_ini: lines = grafana_ini.readlines() - with open('{}.new'.format(filename), 'w') as grafana_ini: + with write_new(filename, perms=None) as grafana_ini: server_section = False for line in lines: if line.startswith('['): @@ -7338,7 +7850,6 @@ def _adjust_grafana_ini(filename): line = re.sub(r'^cert_key.*', 'cert_key = /etc/grafana/certs/cert_key', line) grafana_ini.write(line) - os.rename('{}.new'.format(filename), filename) except OSError as err: raise Error('Cannot update {}: {}'.format(filename, err)) @@ -7396,8 +7907,9 @@ def command_rm_daemon(ctx): else: call_throws(ctx, ['rm', '-rf', data_dir]) - if 'tcp_ports' in ctx and ctx.tcp_ports is not None: - ports: List[int] = [int(p) for p in ctx.tcp_ports.split()] + endpoints = fetch_tcp_ports(ctx) + ports: List[int] = [e.port for e in endpoints] + if ports: try: fw = Firewalld(ctx) fw.close_ports(ports) @@ -7467,14 +7979,20 @@ def get_ceph_cluster_count(ctx: CephadmContext) -> int: return len([c for c in os.listdir(ctx.data_dir) if is_fsid(c)]) -def command_rm_cluster(ctx): - # type: (CephadmContext) -> None +def command_rm_cluster(ctx: CephadmContext) -> None: if not ctx.force: raise Error('must pass --force to proceed: ' 'this command may destroy precious data!') lock = FileLock(ctx, ctx.fsid) lock.acquire() + _rm_cluster(ctx, ctx.keep_logs, ctx.zap_osds) + + +def _rm_cluster(ctx: CephadmContext, keep_logs: bool, zap_osds: bool) -> None: + + if not ctx.fsid: + raise Error('must select the cluster to delete by passing --fsid to proceed') def disable_systemd_service(unit_name: str) -> None: call(ctx, ['systemctl', 'stop', unit_name], @@ -7484,6 +8002,8 @@ def command_rm_cluster(ctx): call(ctx, ['systemctl', 'disable', unit_name], verbosity=CallVerbosity.DEBUG) + logger.info(f'Deleting cluster with fsid: {ctx.fsid}') + # stop + disable individual daemon units for d in list_daemons(ctx, detail=False): if d['fsid'] != ctx.fsid: @@ -7501,7 +8021,7 @@ def command_rm_cluster(ctx): verbosity=CallVerbosity.DEBUG) # osds? - if ctx.zap_osds: + if zap_osds: _zap_osds(ctx) # rm units @@ -7514,7 +8034,7 @@ def command_rm_cluster(ctx): # rm data call_throws(ctx, ['rm', '-rf', ctx.data_dir + '/' + ctx.fsid]) - if not ctx.keep_logs: + if not keep_logs: # rm logs call_throws(ctx, ['rm', '-rf', ctx.log_dir + '/' + ctx.fsid]) call_throws(ctx, ['rm', '-rf', ctx.log_dir @@ -7534,7 +8054,7 @@ def command_rm_cluster(ctx): # rm cephadm logrotate config call_throws(ctx, ['rm', '-f', ctx.logrotate_dir + '/cephadm']) - if not ctx.keep_logs: + if not keep_logs: # remove all cephadm logs for fname in glob(f'{ctx.log_dir}/cephadm.log*'): os.remove(fname) @@ -7547,7 +8067,7 @@ def command_rm_cluster(ctx): p.unlink() # cleanup remaining ceph directories - ceph_dirs = [f'/run/ceph/{ctx.fsid}', f'/tmp/var/lib/ceph/{ctx.fsid}', f'/var/run/ceph/{ctx.fsid}'] + ceph_dirs = [f'/run/ceph/{ctx.fsid}', f'/tmp/cephadm-{ctx.fsid}', f'/var/run/ceph/{ctx.fsid}'] for dd in ceph_dirs: shutil.rmtree(dd, ignore_errors=True) @@ -7671,7 +8191,7 @@ def authorize_ssh_key(ssh_pub_key: str, ssh_user: str) -> bool: with open(auth_keys_file, 'a') as f: os.fchown(f.fileno(), ssh_uid, ssh_gid) # just in case we created it - os.fchmod(f.fileno(), 0o600) # just in case we created it + os.fchmod(f.fileno(), DEFAULT_MODE) # just in case we created it if add_newline: f.write('\n') f.write(ssh_pub_key + '\n') @@ -7690,7 +8210,7 @@ def revoke_ssh_key(key: str, ssh_user: str) -> None: _, filename = tempfile.mkstemp() with open(filename, 'w') as f: os.fchown(f.fileno(), ssh_uid, ssh_gid) - os.fchmod(f.fileno(), 0o600) # secure access to the keys file + os.fchmod(f.fileno(), DEFAULT_MODE) # secure access to the keys file for line in lines: if line.strip() == key.strip(): deleted = True @@ -7715,11 +8235,17 @@ def check_ssh_connectivity(ctx: CephadmContext) -> None: logger.warning('Cannot check ssh connectivity. Skipping...') return - logger.info('Verifying ssh connectivity ...') + ssh_priv_key_path = '' + ssh_pub_key_path = '' + ssh_signed_cert_path = '' if ctx.ssh_private_key and ctx.ssh_public_key: # let's use the keys provided by the user ssh_priv_key_path = pathify(ctx.ssh_private_key.name) ssh_pub_key_path = pathify(ctx.ssh_public_key.name) + elif ctx.ssh_private_key and ctx.ssh_signed_cert: + # CA signed keys use case + ssh_priv_key_path = pathify(ctx.ssh_private_key.name) + ssh_signed_cert_path = pathify(ctx.ssh_signed_cert.name) else: # no custom keys, let's generate some random keys just for this check ssh_priv_key_path = f'/tmp/ssh_key_{uuid.uuid1()}' @@ -7730,31 +8256,35 @@ def check_ssh_connectivity(ctx: CephadmContext) -> None: logger.warning('Cannot generate keys to check ssh connectivity.') return - with open(ssh_pub_key_path, 'r') as f: - key = f.read().strip() - new_key = authorize_ssh_key(key, ctx.ssh_user) - ssh_cfg_file_arg = ['-F', pathify(ctx.ssh_config.name)] if ctx.ssh_config else [] - _, _, code = call(ctx, ['ssh', '-o StrictHostKeyChecking=no', - *ssh_cfg_file_arg, '-i', ssh_priv_key_path, - '-o PasswordAuthentication=no', - f'{ctx.ssh_user}@{get_hostname()}', - 'sudo echo']) - - # we only remove the key if it's a new one. In case the user has provided - # some already existing key then we don't alter authorized_keys file - if new_key: - revoke_ssh_key(key, ctx.ssh_user) - - pub_key_msg = '- The public key file configured by --ssh-public-key is valid\n' if ctx.ssh_public_key else '' - prv_key_msg = '- The private key file configured by --ssh-private-key is valid\n' if ctx.ssh_private_key else '' - ssh_cfg_msg = '- The ssh configuration file configured by --ssh-config is valid\n' if ctx.ssh_config else '' - err_msg = f""" + if ssh_signed_cert_path: + logger.info('Verification for CA signed keys authentication not implemented. Skipping ...') + elif ssh_pub_key_path: + logger.info('Verifying ssh connectivity using standard pubkey authentication ...') + with open(ssh_pub_key_path, 'r') as f: + key = f.read().strip() + new_key = authorize_ssh_key(key, ctx.ssh_user) + ssh_cfg_file_arg = ['-F', pathify(ctx.ssh_config.name)] if ctx.ssh_config else [] + _, _, code = call(ctx, ['ssh', '-o StrictHostKeyChecking=no', + *ssh_cfg_file_arg, '-i', ssh_priv_key_path, + '-o PasswordAuthentication=no', + f'{ctx.ssh_user}@{get_hostname()}', + 'sudo echo']) + + # we only remove the key if it's a new one. In case the user has provided + # some already existing key then we don't alter authorized_keys file + if new_key: + revoke_ssh_key(key, ctx.ssh_user) + + pub_key_msg = '- The public key file configured by --ssh-public-key is valid\n' if ctx.ssh_public_key else '' + prv_key_msg = '- The private key file configured by --ssh-private-key is valid\n' if ctx.ssh_private_key else '' + ssh_cfg_msg = '- The ssh configuration file configured by --ssh-config is valid\n' if ctx.ssh_config else '' + err_msg = f""" ** Please verify your user's ssh configuration and make sure: - User {ctx.ssh_user} must have passwordless sudo access {pub_key_msg}{prv_key_msg}{ssh_cfg_msg} """ - if code != 0: - raise Error(err_msg) + if code != 0: + raise Error(err_msg) def command_prepare_host(ctx: CephadmContext) -> None: @@ -9321,6 +9851,93 @@ def command_maintenance(ctx: CephadmContext) -> str: ################################## +class ArgumentFacade: + def __init__(self) -> None: + self.defaults: Dict[str, Any] = {} + + def add_argument(self, *args: Any, **kwargs: Any) -> None: + if not args: + raise ValueError('expected at least one argument') + name = args[0] + if not name.startswith('--'): + raise ValueError(f'expected long option, got: {name!r}') + name = name[2:].replace('-', '_') + value = kwargs.pop('default', None) + self.defaults[name] = value + + def apply(self, ctx: CephadmContext) -> None: + for key, value in self.defaults.items(): + setattr(ctx, key, value) + + +def _add_deploy_parser_args( + parser_deploy: Union[argparse.ArgumentParser, ArgumentFacade], +) -> None: + parser_deploy.add_argument( + '--config', '-c', + help='config file for new daemon') + parser_deploy.add_argument( + '--config-json', + help='Additional configuration information in JSON format') + parser_deploy.add_argument( + '--keyring', + help='keyring for new daemon') + parser_deploy.add_argument( + '--key', + help='key for new daemon') + parser_deploy.add_argument( + '--osd-fsid', + help='OSD uuid, if creating an OSD container') + parser_deploy.add_argument( + '--skip-firewalld', + action='store_true', + help='Do not configure firewalld') + parser_deploy.add_argument( + '--tcp-ports', + help='List of tcp ports to open in the host firewall') + parser_deploy.add_argument( + '--port-ips', + help='JSON dict mapping ports to IPs they need to be bound on' + ) + parser_deploy.add_argument( + '--reconfig', + action='store_true', + help='Reconfigure a previously deployed daemon') + parser_deploy.add_argument( + '--allow-ptrace', + action='store_true', + help='Allow SYS_PTRACE on daemon container') + parser_deploy.add_argument( + '--container-init', + action='store_true', + default=CONTAINER_INIT, + help=argparse.SUPPRESS) + parser_deploy.add_argument( + '--memory-request', + help='Container memory request/target' + ) + parser_deploy.add_argument( + '--memory-limit', + help='Container memory hard limit' + ) + parser_deploy.add_argument( + '--meta-json', + help='JSON dict of additional metadata' + ) + parser_deploy.add_argument( + '--extra-container-args', + action='append', + default=[], + help='Additional container arguments to apply to daemon' + ) + parser_deploy.add_argument( + '--extra-entrypoint-args', + action='append', + default=[], + help='Additional entrypoint arguments to apply to deamon' + ) + + def _get_parser(): # type: () -> argparse.ArgumentParser parser = argparse.ArgumentParser( @@ -9387,7 +10004,7 @@ def _get_parser(): subparsers = parser.add_subparsers(help='sub-command') parser_version = subparsers.add_parser( - 'version', help='get ceph version from container') + 'version', help='get cephadm version') parser_version.set_defaults(func=command_version) parser_pull = subparsers.add_parser( @@ -9558,6 +10175,10 @@ def _get_parser(): '--no-hosts', action='store_true', help='dont pass /etc/hosts through to the container') + parser_shell.add_argument( + '--dry-run', + action='store_true', + help='print, but do not execute, the container command to start the shell') parser_enter = subparsers.add_parser( 'enter', help='run an interactive shell inside a running daemon container') @@ -9714,6 +10335,10 @@ def _get_parser(): '--ssh-public-key', type=argparse.FileType('r'), help='SSH public key') + parser_bootstrap.add_argument( + '--ssh-signed-cert', + type=argparse.FileType('r'), + help='Signed cert for setups using CA signed SSH keys') parser_bootstrap.add_argument( '--ssh-user', default='root', @@ -9750,6 +10375,11 @@ def _get_parser(): '--allow-overwrite', action='store_true', help='allow overwrite of existing --output-* config/keyring/ssh files') + parser_bootstrap.add_argument( + '--cleanup-on-failure', + action='store_true', + default=False, + help='Delete cluster files in case of a failed installation') parser_bootstrap.add_argument( '--allow-fqdn-hostname', action='store_true', @@ -9823,64 +10453,29 @@ def _get_parser(): '--fsid', required=True, help='cluster FSID') - parser_deploy.add_argument( - '--config', '-c', - help='config file for new daemon') - parser_deploy.add_argument( - '--config-json', - help='Additional configuration information in JSON format') - parser_deploy.add_argument( - '--keyring', - help='keyring for new daemon') - parser_deploy.add_argument( - '--key', - help='key for new daemon') - parser_deploy.add_argument( - '--osd-fsid', - help='OSD uuid, if creating an OSD container') - parser_deploy.add_argument( - '--skip-firewalld', - action='store_true', - help='Do not configure firewalld') - parser_deploy.add_argument( - '--tcp-ports', - help='List of tcp ports to open in the host firewall') - parser_deploy.add_argument( - '--reconfig', - action='store_true', - help='Reconfigure a previously deployed daemon') - parser_deploy.add_argument( - '--allow-ptrace', - action='store_true', - help='Allow SYS_PTRACE on daemon container') - parser_deploy.add_argument( - '--container-init', - action='store_true', - default=CONTAINER_INIT, - help=argparse.SUPPRESS) - parser_deploy.add_argument( - '--memory-request', - help='Container memory request/target' - ) - parser_deploy.add_argument( - '--memory-limit', - help='Container memory hard limit' - ) - parser_deploy.add_argument( - '--meta-json', - help='JSON dict of additional metadata' + _add_deploy_parser_args(parser_deploy) + + parser_orch = subparsers.add_parser( + '_orch', ) - parser_deploy.add_argument( - '--extra-container-args', - action='append', - default=[], - help='Additional container arguments to apply to daemon' + subparsers_orch = parser_orch.add_subparsers( + title='Orchestrator Driven Commands', + description='Commands that are typically only run by cephadm mgr module', ) - parser_deploy.add_argument( - '--extra-entrypoint-args', - action='append', - default=[], - help='Additional entrypoint arguments to apply to deamon' + + parser_deploy_from = subparsers_orch.add_parser( + 'deploy', help='deploy a daemon') + parser_deploy_from.set_defaults(func=command_deploy_from) + # currently cephadm mgr module passes an fsid option on the CLI too + # TODO: remove this and always source fsid from the JSON? + parser_deploy_from.add_argument( + '--fsid', + help='cluster FSID') + parser_deploy_from.add_argument( + 'source', + default='-', + nargs='?', + help='Configuration input source file', ) parser_check_host = subparsers.add_parser( @@ -10066,6 +10661,15 @@ def main() -> None: sys.stderr.write('No command specified; pass -h or --help for usage\n') sys.exit(1) + if ctx.has_function() and getattr(ctx.func, '_execute_early', False): + try: + sys.exit(ctx.func(ctx)) + except Error as e: + if ctx.verbose: + raise + logger.error('ERROR: %s' % e) + sys.exit(1) + cephadm_require_root() cephadm_init_logging(ctx, av) try: @@ -10082,7 +10686,7 @@ def main() -> None: check_container_engine(ctx) # command handler r = ctx.func(ctx) - except Error as e: + except (Error, ClusterAlreadyExists) as e: if ctx.verbose: raise logger.error('ERROR: %s' % e) diff --git a/ceph/src/cephadm/tests/fixtures.py b/ceph/src/cephadm/tests/fixtures.py index b3926f9a9..76ac0b44c 100644 --- a/ceph/src/cephadm/tests/fixtures.py +++ b/ceph/src/cephadm/tests/fixtures.py @@ -1,4 +1,3 @@ -from importlib.resources import contents import mock import os import pytest @@ -71,8 +70,17 @@ def cephadm_fs( uid = os.getuid() gid = os.getgid() + def fchown(fd, _uid, _gid): + """pyfakefs doesn't provide a working fchown or fchmod. + In order to get permissions working generally across renames + we need to provide our own implemenation. + """ + file_obj = fs.get_open_file(fd).get_object() + file_obj.st_uid = _uid + file_obj.st_gid = _gid + _cephadm = import_cephadm() - with mock.patch('os.fchown'), \ + with mock.patch('os.fchown', side_effect=fchown), \ mock.patch('os.fchmod'), \ mock.patch('platform.processor', return_value='x86_64'), \ mock.patch('cephadm.extract_uid_gid', return_value=(uid, gid)): diff --git a/ceph/src/cephadm/tests/test_agent.py b/ceph/src/cephadm/tests/test_agent.py index f57972940..f9cf201e2 100644 --- a/ceph/src/cephadm/tests/test_agent.py +++ b/ceph/src/cephadm/tests/test_agent.py @@ -433,8 +433,8 @@ def test_agent_run(_pull_conf_settings, _port_in_use, _gatherer_start, host = AGENT_ID device_enhanced_scan = False - def _fake_port_in_use(ctx, port): - if port == open_listener_port: + def _fake_port_in_use(ctx, endpoint): + if endpoint.port == open_listener_port: return False return True diff --git a/ceph/src/cephadm/tests/test_cephadm.py b/ceph/src/cephadm/tests/test_cephadm.py index ff6a5c9d4..d310215f6 100644 --- a/ceph/src/cephadm/tests/test_cephadm.py +++ b/ceph/src/cephadm/tests/test_cephadm.py @@ -58,8 +58,8 @@ class TestCephAdm(object): for side_effect, expected_exception in ( (os_error(errno.EADDRINUSE), _cephadm.PortOccupiedError), - (os_error(errno.EAFNOSUPPORT), _cephadm.Error), - (os_error(errno.EADDRNOTAVAIL), _cephadm.Error), + (os_error(errno.EAFNOSUPPORT), OSError), + (os_error(errno.EADDRNOTAVAIL), OSError), (None, None), ): _socket = mock.Mock() @@ -77,20 +77,68 @@ class TestCephAdm(object): def test_port_in_use(self, _logger, _attempt_bind): empty_ctx = None - assert _cephadm.port_in_use(empty_ctx, 9100) == False + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == False _attempt_bind.side_effect = _cephadm.PortOccupiedError('msg') - assert _cephadm.port_in_use(empty_ctx, 9100) == True + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == True os_error = OSError() os_error.errno = errno.EADDRNOTAVAIL _attempt_bind.side_effect = os_error - assert _cephadm.port_in_use(empty_ctx, 9100) == False + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == False os_error = OSError() os_error.errno = errno.EAFNOSUPPORT _attempt_bind.side_effect = os_error - assert _cephadm.port_in_use(empty_ctx, 9100) == False + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('0.0.0.0', 9100)) == False + + @mock.patch('cephadm.socket.socket.bind') + @mock.patch('cephadm.logger') + def test_port_in_use_special_cases(self, _logger, _bind): + # port_in_use has special handling for + # EAFNOSUPPORT and EADDRNOTAVAIL errno OSErrors. + # If we get those specific errors when attempting + # to bind to the ip:port we should not say the + # port is in use + + def os_error(errno): + _os_error = OSError() + _os_error.errno = errno + return _os_error + + _bind.side_effect = os_error(errno.EADDRNOTAVAIL) + in_use = _cephadm.port_in_use(None, _cephadm.EndPoint('1.2.3.4', 10000)) + assert in_use == False + + _bind.side_effect = os_error(errno.EAFNOSUPPORT) + in_use = _cephadm.port_in_use(None, _cephadm.EndPoint('1.2.3.4', 10000)) + assert in_use == False + + # this time, have it raise the actual port taken error + # so it should report the port is in use + _bind.side_effect = os_error(errno.EADDRINUSE) + in_use = _cephadm.port_in_use(None, _cephadm.EndPoint('1.2.3.4', 10000)) + assert in_use == True + + @mock.patch('cephadm.attempt_bind') + @mock.patch('cephadm.logger') + def test_port_in_use_with_specific_ips(self, _logger, _attempt_bind): + empty_ctx = None + + def _fake_attempt_bind(ctx, s: socket.socket, addr: str, port: int) -> None: + occupied_error = _cephadm.PortOccupiedError('msg') + if addr.startswith('200'): + raise occupied_error + if addr.startswith('100'): + if port == 4567: + raise occupied_error + + _attempt_bind.side_effect = _fake_attempt_bind + + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('200.0.0.0', 9100)) == True + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('100.0.0.0', 9100)) == False + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('100.0.0.0', 4567)) == True + assert _cephadm.port_in_use(empty_ctx, _cephadm.EndPoint('155.0.0.0', 4567)) == False @mock.patch('socket.socket') @mock.patch('cephadm.logger') @@ -126,8 +174,8 @@ class TestCephAdm(object): ): for side_effect, expected_exception in ( (os_error(errno.EADDRINUSE), _cephadm.PortOccupiedError), - (os_error(errno.EADDRNOTAVAIL), _cephadm.Error), - (os_error(errno.EAFNOSUPPORT), _cephadm.Error), + (os_error(errno.EADDRNOTAVAIL), OSError), + (os_error(errno.EAFNOSUPPORT), OSError), (None, None), ): mock_socket_obj = mock.Mock() @@ -258,7 +306,7 @@ class TestCephAdm(object): _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None) @mock.patch('cephadm.logger') - @mock.patch('cephadm.get_custom_config_files') + @mock.patch('cephadm.fetch_custom_config_files') @mock.patch('cephadm.get_container') def test_get_deployment_container(self, _get_container, _get_config, _logger): """ @@ -272,12 +320,12 @@ class TestCephAdm(object): '--something', ] ctx.data_dir = 'data' - _get_config.return_value = {'custom_config_files': [ + _get_config.return_value = [ { 'mount_path': '/etc/testing.str', 'content': 'this\nis\na\nstring', } - ]} + ] _get_container.return_value = _cephadm.CephContainer.for_daemon( ctx, fsid='9b9d7609-f4d5-4aba-94c8-effa764d96c9', @@ -306,13 +354,16 @@ class TestCephAdm(object): @mock.patch('cephadm.logger') @mock.patch('cephadm.FileLock') @mock.patch('cephadm.deploy_daemon') - @mock.patch('cephadm.get_parm') + @mock.patch('cephadm.fetch_configs') @mock.patch('cephadm.make_var_run') @mock.patch('cephadm.migrate_sysctl_dir') @mock.patch('cephadm.check_unit', lambda *args, **kwargs: (None, 'running', None)) @mock.patch('cephadm.get_unit_name', lambda *args, **kwargs: 'mon-unit-name') @mock.patch('cephadm.get_deployment_container') - def test_mon_crush_location(self, _get_deployment_container, _migrate_sysctl, _make_var_run, _get_parm, _deploy_daemon, _file_lock, _logger): + @mock.patch('cephadm.read_configuration_source', lambda c: {}) + @mock.patch('cephadm.apply_deploy_config_to_ctx', lambda d, c: None) + @mock.patch('cephadm.extract_uid_gid', lambda *args, **kwargs: ('ceph', 'ceph')) + def test_mon_crush_location(self, _get_deployment_container, _migrate_sysctl, _make_var_run, _fetch_configs, _deploy_daemon, _file_lock, _logger): """ test that crush location for mon is set if it is included in config_json """ @@ -325,7 +376,8 @@ class TestCephAdm(object): ctx.allow_ptrace = True ctx.config_json = '-' ctx.osd_fsid = '0' - _get_parm.return_value = { + ctx.tcp_ports = '3300 6789' + _fetch_configs.return_value = { 'crush_location': 'database=a' } @@ -352,10 +404,10 @@ class TestCephAdm(object): _deploy_daemon.side_effect = _crush_location_checker with pytest.raises(Exception, match='--set-crush-location database=a'): - _cephadm.command_deploy(ctx) + _cephadm.command_deploy_from(ctx) @mock.patch('cephadm.logger') - @mock.patch('cephadm.get_custom_config_files') + @mock.patch('cephadm.fetch_custom_config_files') def test_write_custom_conf_files(self, _get_config, _logger, cephadm_fs): """ test _write_custom_conf_files writes the conf files correctly @@ -364,7 +416,7 @@ class TestCephAdm(object): ctx = _cephadm.CephadmContext() ctx.config_json = '-' ctx.data_dir = _cephadm.DATA_DIR - _get_config.return_value = {'custom_config_files': [ + _get_config.return_value = [ { 'mount_path': '/etc/testing.str', 'content': 'this\nis\na\nstring', @@ -376,7 +428,7 @@ class TestCephAdm(object): { 'mount_path': '/etc/no-content.conf', }, - ]} + ] _cephadm._write_custom_conf_files(ctx, 'mon', 'host1', 'fsid', 0, 0) with open(os.path.join(_cephadm.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'testing.str'), 'r') as f: assert 'this\nis\na\nstring' == f.read() @@ -1769,11 +1821,11 @@ if ! grep -qs /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id # iscsi tcmu-runner container ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id-tcmu 2> /dev/null ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu 2> /dev/null -/usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/bin/tcmu-runner --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph & +/usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/local/scripts/tcmu-runner-entrypoint.sh --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/tcmu-runner-entrypoint.sh:/usr/local/scripts/tcmu-runner-entrypoint.sh -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph & # iscsi.daemon_id ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id 2> /dev/null ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id 2> /dev/null -/usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/bin/rbd-target-api --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph +/usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/bin/rbd-target-api --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -e CEPH_USE_RANDOM_NONCE=1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/tcmu-runner-entrypoint.sh:/usr/local/scripts/tcmu-runner-entrypoint.sh -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph """ def test_get_container(self): diff --git a/ceph/src/cephadm/tests/test_util_funcs.py b/ceph/src/cephadm/tests/test_util_funcs.py index 2c20dfb91..270753a55 100644 --- a/ceph/src/cephadm/tests/test_util_funcs.py +++ b/ceph/src/cephadm/tests/test_util_funcs.py @@ -94,7 +94,7 @@ class TestCopyTree: self._copy_tree([src1], dst, uid=0, gid=0) assert len(_chown.mock_calls) >= 2 for c in _chown.mock_calls: - assert c.args[1:] == (0, 0) + assert c == mock.call(mock.ANY, 0, 0) assert (dst / "foo.txt").exists() @@ -187,7 +187,7 @@ class TestCopyFiles: self._copy_files([file1], dst, uid=0, gid=0) assert len(_chown.mock_calls) >= 1 for c in _chown.mock_calls: - assert c.args[1:] == (0, 0) + assert c == mock.call(mock.ANY, 0, 0) assert (dst / "f1.txt").exists() @@ -270,7 +270,7 @@ class TestMoveFiles: self._move_files([file1], dst, uid=0, gid=0) assert len(_chown.mock_calls) >= 1 for c in _chown.mock_calls: - assert c.args[1:] == (0, 0) + assert c == mock.call(mock.ANY, 0, 0) assert dst.is_file() assert not file1.exists() @@ -288,9 +288,9 @@ def test_recursive_chown(tmp_path): _chown.return_value = None _cephadm.recursive_chown(str(d1), uid=500, gid=500) assert len(_chown.mock_calls) == 3 - assert _chown.mock_calls[0].args == (str(d1), 500, 500) - assert _chown.mock_calls[1].args == (str(d2), 500, 500) - assert _chown.mock_calls[2].args == (str(f1), 500, 500) + assert _chown.mock_calls[0] == mock.call(str(d1), 500, 500) + assert _chown.mock_calls[1] == mock.call(str(d2), 500, 500) + assert _chown.mock_calls[2] == mock.call(str(f1), 500, 500) class TestFindExecutable: @@ -663,3 +663,146 @@ def test_call(caplog, monkeypatch, pyline, expected, call_kwargs, log_check): assert result == expected if callable(log_check): log_check(caplog) + + +class TestWriteNew: + def test_success(self, tmp_path): + "Test the simple basic feature of writing a file." + dest = tmp_path / "foo.txt" + with _cephadm.write_new(dest) as fh: + fh.write("something\n") + fh.write("something else\n") + + with open(dest, "r") as fh: + assert fh.read() == "something\nsomething else\n" + + def test_write_ower_mode(self, tmp_path): + "Test that the owner and perms options function." + dest = tmp_path / "foo.txt" + + # if this is test run as non-root, we can't really change ownership + uid = os.getuid() + gid = os.getgid() + + with _cephadm.write_new(dest, owner=(uid, gid), perms=0o600) as fh: + fh.write("xomething\n") + fh.write("xomething else\n") + + with open(dest, "r") as fh: + assert fh.read() == "xomething\nxomething else\n" + sr = os.fstat(fh.fileno()) + assert sr.st_uid == uid + assert sr.st_gid == gid + assert (sr.st_mode & 0o777) == 0o600 + + def test_encoding(self, tmp_path): + "Test that the encoding option functions." + dest = tmp_path / "foo.txt" + msg = "\u2603\u26C5\n" + with _cephadm.write_new(dest, encoding='utf-8') as fh: + fh.write(msg) + with open(dest, "rb") as fh: + b1 = fh.read() + assert b1.decode('utf-8') == msg + + dest = tmp_path / "foo2.txt" + with _cephadm.write_new(dest, encoding='utf-16le') as fh: + fh.write(msg) + with open(dest, "rb") as fh: + b2 = fh.read() + assert b2.decode('utf-16le') == msg + + # the binary data should differ due to the different encodings + assert b1 != b2 + + def test_cleanup(self, tmp_path): + "Test that an exception during write leaves no file behind." + dest = tmp_path / "foo.txt" + with pytest.raises(ValueError): + with _cephadm.write_new(dest) as fh: + fh.write("hello\n") + raise ValueError("foo") + fh.write("world\n") + assert not dest.exists() + assert not dest.with_name(dest.name+".new").exists() + assert list(dest.parent.iterdir()) == [] + + +class CompareContext1: + cfg_data = { + "name": "mane", + "fsid": "foobar", + "image": "fake.io/noway/nohow:gndn", + "meta": { + "fruit": "banana", + "vegetable": "carrot", + }, + "params": { + "osd_fsid": "robble", + "tcp_ports": [404, 9999], + }, + "config_blobs": { + "alpha": {"sloop": "John B"}, + "beta": {"forest": "birch"}, + "gamma": {"forest": "pine"}, + }, + } + + def check(self, ctx): + assert ctx.name == 'mane' + assert ctx.fsid == 'foobar' + assert ctx.image == 'fake.io/noway/nohow:gndn' + assert ctx.meta_properties == {"fruit": "banana", "vegetable": "carrot"} + assert ctx.config_blobs == { + "alpha": {"sloop": "John B"}, + "beta": {"forest": "birch"}, + "gamma": {"forest": "pine"}, + } + assert ctx.osd_fsid == "robble" + assert ctx.tcp_ports == [404, 9999] + + +class CompareContext2: + cfg_data = { + "name": "cc2", + "fsid": "foobar", + "meta": { + "fruit": "banana", + "vegetable": "carrot", + }, + "params": {}, + "config_blobs": { + "alpha": {"sloop": "John B"}, + "beta": {"forest": "birch"}, + "gamma": {"forest": "pine"}, + }, + } + + def check(self, ctx): + assert ctx.name == 'cc2' + assert ctx.fsid == 'foobar' + assert ctx.image == 'quay.io/ceph/ceph:v18' + assert ctx.meta_properties == {"fruit": "banana", "vegetable": "carrot"} + assert ctx.config_blobs == { + "alpha": {"sloop": "John B"}, + "beta": {"forest": "birch"}, + "gamma": {"forest": "pine"}, + } + assert ctx.osd_fsid is None + assert ctx.tcp_ports is None + + +@pytest.mark.parametrize( + "cc", + [ + CompareContext1(), + CompareContext2(), + ], +) +def test_apply_deploy_config_to_ctx(cc, monkeypatch): + import logging + + monkeypatch.setattr("cephadm.logger", logging.getLogger()) + ctx = FakeContext() + _cephadm.apply_deploy_config_to_ctx(cc.cfg_data, ctx) + cc.check(ctx) diff --git a/ceph/src/client/Client.cc b/ceph/src/client/Client.cc index 7c3f117a8..2b7db5a89 100644 --- a/ceph/src/client/Client.cc +++ b/ceph/src/client/Client.cc @@ -72,6 +72,7 @@ #include "mds/flock.h" #include "mds/cephfs_features.h" +#include "mds/snap.h" #include "osd/OSDMap.h" #include "osdc/Filer.h" @@ -1209,6 +1210,11 @@ Dentry *Client::insert_dentry_inode(Dir *dir, const string& dname, LeaseStat *dl Inode *diri = dir->parent_inode; clear_dir_complete_and_ordered(diri, false); dn = link(dir, dname, in, dn); + + if (old_dentry) { + dn->is_renaming = false; + signal_cond_list(waiting_for_rename); + } } update_dentry_lease(dn, dlease, from, session); @@ -1292,7 +1298,8 @@ void Client::clear_dir_complete_and_ordered(Inode *diri, bool complete) /* * insert results from readdir or lssnap into the metadata cache. */ -void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, Inode *diri) { +void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, + Inode *diri, Inode *diri_other) { auto& reply = request->reply; ConnectionRef con = request->reply->get_connection(); @@ -1307,7 +1314,8 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, dir_result_t *dirp = request->dirp; ceph_assert(dirp); - // the extra buffer list is only set for readdir and lssnap replies + // the extra buffer list is only set for readdir, lssnap and + // readdir_snapdiff replies auto p = reply->get_extra_bl().cbegin(); if (!p.end()) { // snapdir? @@ -1315,10 +1323,27 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, ceph_assert(diri); diri = open_snapdir(diri); } + bool snapdiff_req = request->head.op == CEPH_MDS_OP_READDIR_SNAPDIFF; + frag_t fg; + unsigned offset_hash; + if (snapdiff_req) { + fg = (unsigned)request->head.args.snapdiff.frag; + offset_hash = (unsigned)request->head.args.snapdiff.offset_hash; + } else { + fg = (unsigned)request->head.args.readdir.frag; + offset_hash = (unsigned)request->head.args.readdir.offset_hash; + } // only open dir if we're actually adding stuff to it! Dir *dir = diri->open_dir(); ceph_assert(dir); + //open opponent dir for snapdiff if any + Dir *dir_other = nullptr; + if (snapdiff_req) { + ceph_assert(diri_other); + dir_other = diri_other->open_dir(); + ceph_assert(dir_other); + } // dirstat DirStat dst(p, features); @@ -1330,7 +1355,6 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, bool end = ((unsigned)flags & CEPH_READDIR_FRAG_END); bool hash_order = ((unsigned)flags & CEPH_READDIR_HASH_ORDER); - frag_t fg = (unsigned)request->head.args.readdir.frag; unsigned readdir_offset = dirp->next_offset; string readdir_start = dirp->last_name; ceph_assert(!readdir_start.empty() || readdir_offset == 2); @@ -1341,7 +1365,7 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, last_hash = ceph_frag_value(diri->hash_dentry_name(readdir_start)); } else if (flags & CEPH_READDIR_OFFSET_HASH) { /* mds understands offset_hash */ - last_hash = (unsigned)request->head.args.readdir.offset_hash; + last_hash = offset_hash; } } @@ -1386,13 +1410,22 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, Inode *in = add_update_inode(&ist, request->sent_stamp, session, request->perms); + auto *effective_dir = dir; + auto *effective_diri = diri; + + if (snapdiff_req && in->snapid != diri->snapid) { + ceph_assert(diri_other); + ceph_assert(dir_other); + effective_diri = diri_other; + effective_dir = dir_other; + } Dentry *dn; - if (diri->dir->dentries.count(dname)) { - Dentry *olddn = diri->dir->dentries[dname]; + if (effective_dir->dentries.count(dname)) { + Dentry *olddn = effective_dir->dentries[dname]; if (olddn->inode != in) { // replace incorrect dentry unlink(olddn, true, true); // keep dir, dentry - dn = link(dir, dname, in, olddn); + dn = link(effective_dir, dname, in, olddn); ceph_assert(dn == olddn); } else { // keep existing dn @@ -1401,13 +1434,13 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, } } else { // new dn - dn = link(dir, dname, in, NULL); + dn = link(effective_dir, dname, in, NULL); } dn->alternate_name = std::move(dlease.alternate_name); update_dentry_lease(dn, &dlease, request->sent_stamp, session); if (hash_order) { - unsigned hash = ceph_frag_value(diri->hash_dentry_name(dname)); + unsigned hash = ceph_frag_value(effective_diri->hash_dentry_name(dname)); if (hash != last_hash) readdir_offset = 2; last_hash = hash; @@ -1416,20 +1449,21 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, dn->offset = dir_result_t::make_fpos(fg, readdir_offset++, false); } // add to readdir cache - if (dirp->release_count == diri->dir_release_count && - dirp->ordered_count == diri->dir_ordered_count && - dirp->start_shared_gen == diri->shared_gen) { - if (dirp->cache_index == dir->readdir_cache.size()) { + if (!snapdiff_req && + dirp->release_count == effective_diri->dir_release_count && + dirp->ordered_count == effective_diri->dir_ordered_count && + dirp->start_shared_gen == effective_diri->shared_gen) { + if (dirp->cache_index == effective_dir->readdir_cache.size()) { if (i == 0) { ceph_assert(!dirp->inode->is_complete_and_ordered()); dir->readdir_cache.reserve(dirp->cache_index + numdn); } - dir->readdir_cache.push_back(dn); - } else if (dirp->cache_index < dir->readdir_cache.size()) { + effective_dir->readdir_cache.push_back(dn); + } else if (dirp->cache_index < effective_dir->readdir_cache.size()) { if (dirp->inode->is_complete_and_ordered()) - ceph_assert(dir->readdir_cache[dirp->cache_index] == dn); + ceph_assert(effective_dir->readdir_cache[dirp->cache_index] == dn); else - dir->readdir_cache[dirp->cache_index] = dn; + effective_dir->readdir_cache[dirp->cache_index] = dn; } else { ceph_abort_msg("unexpected readdir buffer idx"); } @@ -1449,6 +1483,8 @@ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, if (dir->is_empty()) close_dir(dir); + if (dir_other && dir_other->is_empty()) + close_dir(dir_other); } } @@ -1608,10 +1644,20 @@ Inode* Client::insert_trace(MetaRequest *request, MetaSession *session) if (in) { if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) { - insert_readdir_results(request, session, in); + insert_readdir_results(request, + session, + in, + nullptr); } else if (op == CEPH_MDS_OP_LOOKUPNAME) { // hack: return parent inode instead in = diri; + } else if (op == CEPH_MDS_OP_READDIR_SNAPDIFF) { + // provide both request's inode (aka snapA) and traced one (snapB) + // to properly match snapdiff results + insert_readdir_results(request, + session, + request->inode(), + in); } if (request->dentry() == NULL && in != request->inode()) { @@ -1685,7 +1731,7 @@ mds_rank_t Client::choose_target_mds(MetaRequest *req, Inode** phash_diri) * I think the MDS should be able to redirect as needed*/ in = in->get_first_parent()->dir->parent_inode; else { - ldout(cct, 10) << "got unlinked inode, can't look at parent" << dendl; + ldout(cct, 10) << __func__ << "got unlinked inode, can't look at parent" << dendl; break; } } @@ -2333,6 +2379,12 @@ void Client::_closed_mds_session(MetaSession *s, int err, bool rejected) mds_sessions.erase(s->mds_num); } +static void reinit_mds_features(MetaSession *session, + const MConstRef& m) { + session->mds_features = std::move(m->supported_features); + session->mds_metric_flags = std::move(m->metric_spec.metric_flags); +} + void Client::handle_client_session(const MConstRef& m) { mds_rank_t from = mds_rank_t(m->get_source().num()); @@ -2351,6 +2403,13 @@ void Client::handle_client_session(const MConstRef& m) if (session->state == MetaSession::STATE_OPEN) { ldout(cct, 10) << "mds." << from << " already opened, ignore it" << dendl; + // The MDS could send a client_session(open) message even when + // the session state is STATE_OPEN. Normally, its fine to + // ignore this message, but, if the MDS sent this message just + // after it got upgraded, the MDS feature bits could differ + // than the one before the upgrade - so, refresh the feature + // bits the client holds. + reinit_mds_features(session.get(), m); return; } /* @@ -2360,8 +2419,7 @@ void Client::handle_client_session(const MConstRef& m) if (!session->seq && m->get_seq()) session->seq = m->get_seq(); - session->mds_features = std::move(m->supported_features); - session->mds_metric_flags = std::move(m->metric_spec.metric_flags); + reinit_mds_features(session.get(), m); renew_caps(session.get()); session->state = MetaSession::STATE_OPEN; @@ -2546,7 +2604,7 @@ ref_t Client::build_client_request(MetaRequest *request, mds_ran } } - auto req = make_message(request->get_op(), old_version); + auto req = make_message(request->get_op(), session->mds_features); req->set_tid(request->tid); req->set_stamp(request->op_stamp); memcpy(&req->head, &request->head, sizeof(ceph_mds_request_head)); @@ -3421,12 +3479,17 @@ Dentry* Client::link(Dir *dir, const string& name, Inode *in, Dentry *dn) lru.lru_insert_mid(dn); // mid or top? - ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in - << " dn " << dn << " (new dn)" << dendl; + if(in) { + ldout(cct, 15) << "link dir " << *dir->parent_inode << " '" << name << "' to inode " << *in + << " dn " << *dn << " (new dn)" << dendl; + } else { + ldout(cct, 15) << "link dir " << *dir->parent_inode << " '" << name << "' " + << " dn " << *dn << " (new dn)" << dendl; + } } else { ceph_assert(!dn->inode); - ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in - << " dn " << dn << " (old dn)" << dendl; + ldout(cct, 15) << "link dir " << *dir->parent_inode << " '" << name << "' to inode " << in + << " dn " << *dn << " (old dn)" << dendl; } if (in) { // link to inode @@ -3529,7 +3592,7 @@ void Client::put_cap_ref(Inode *in, int cap) int put_nref = 0; int drop = last & ~in->caps_issued(); if (in->snapid == CEPH_NOSNAP) { - if ((last & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER)) && + if ((last & CEPH_CAP_FILE_WR) && !in->cap_snaps.empty() && in->cap_snaps.rbegin()->second.writing) { ldout(cct, 10) << __func__ << " finishing pending cap_snap on " << *in << dendl; @@ -3543,6 +3606,10 @@ void Client::put_cap_ref(Inode *in, int cap) signal_cond_list(in->waitfor_commit); ldout(cct, 5) << __func__ << " dropped last FILE_BUFFER ref on " << *in << dendl; ++put_nref; + + if (!in->cap_snaps.empty()) { + flush_snaps(in); + } } } if (last & CEPH_CAP_FILE_CACHE) { @@ -4003,15 +4070,13 @@ void Client::queue_cap_snap(Inode *in, SnapContext& old_snapc) in->cap_snaps.rbegin()->second.writing) { ldout(cct, 10) << __func__ << " already have pending cap_snap on " << *in << dendl; return; - } else if (in->caps_dirty() || - (used & CEPH_CAP_FILE_WR) || - (dirty & CEPH_CAP_ANY_WR)) { + } else if (dirty || (used & CEPH_CAP_FILE_WR)) { const auto &capsnapem = in->cap_snaps.emplace(std::piecewise_construct, std::make_tuple(old_snapc.seq), std::make_tuple(in)); ceph_assert(capsnapem.second); /* element inserted */ CapSnap &capsnap = capsnapem.first->second; capsnap.context = old_snapc; capsnap.issued = in->caps_issued(); - capsnap.dirty = in->caps_dirty(); + capsnap.dirty = dirty; capsnap.dirty_data = (used & CEPH_CAP_FILE_BUFFER); @@ -4058,9 +4123,11 @@ void Client::finish_cap_snap(Inode *in, CapSnap &capsnap, int used) } if (used & CEPH_CAP_FILE_BUFFER) { - capsnap.writing = 1; ldout(cct, 10) << __func__ << " " << *in << " cap_snap " << &capsnap << " used " << used - << " WRBUFFER, delaying" << dendl; + << " WRBUFFER, trigger to flush dirty buffer" << dendl; + + /* trigger to flush the buffer */ + _flush(in, new C_Client_FlushComplete(this, in)); } else { capsnap.dirty_data = 0; flush_snaps(in); @@ -4412,11 +4479,19 @@ void Client::add_update_cap(Inode *in, MetaSession *mds_session, uint64_t cap_id if (flags & CEPH_CAP_FLAG_AUTH) { if (in->auth_cap != &cap && (!in->auth_cap || ceph_seq_cmp(in->auth_cap->mseq, mseq) < 0)) { - if (in->auth_cap && in->flushing_cap_item.is_on_list()) { - ldout(cct, 10) << __func__ << " changing auth cap: " - << "add myself to new auth MDS' flushing caps list" << dendl; - adjust_session_flushing_caps(in, in->auth_cap->session, mds_session); + if (in->auth_cap) { + if (in->flushing_cap_item.is_on_list()) { + ldout(cct, 10) << __func__ << " changing auth cap: " + << "add myself to new auth MDS' flushing caps list" << dendl; + adjust_session_flushing_caps(in, in->auth_cap->session, mds_session); + } + if (in->dirty_cap_item.is_on_list()) { + ldout(cct, 10) << __func__ << " changing auth cap: " + << "add myself to new auth MDS' dirty caps list" << dendl; + mds_session->get_dirty_list().push_back(&in->dirty_cap_item); + } } + in->auth_cap = ∩ } } @@ -5263,24 +5338,48 @@ void Client::handle_caps(const MConstRef& m) got_mds_push(session.get()); + bool do_cap_release = false; Inode *in; vinodeno_t vino(m->get_ino(), CEPH_NOSNAP); if (auto it = inode_map.find(vino); it != inode_map.end()) { in = it->second; + + /* MDS maybe waiting for cap release with increased seq */ + switch (m->get_op()) { + case CEPH_CAP_OP_REVOKE: + case CEPH_CAP_OP_GRANT: + if (!in->caps.count(mds)) { + do_cap_release = true; + ldout(cct, 5) << __func__ << " vino " << vino << " don't have cap " + << m->get_cap_id() << " op " << m->get_op() + << ", immediately releasing" << dendl; + } + } } else { - if (m->get_op() == CEPH_CAP_OP_IMPORT) { - ldout(cct, 5) << __func__ << " don't have vino " << vino << " on IMPORT, immediately releasing" << dendl; - session->enqueue_cap_release( - m->get_ino(), - m->get_cap_id(), - m->get_seq(), - m->get_mseq(), - cap_epoch_barrier); - } else { - ldout(cct, 5) << __func__ << " don't have vino " << vino << ", dropping" << dendl; + /* MDS maybe waiting for cap release with increased seq */ + switch (m->get_op()) { + case CEPH_CAP_OP_IMPORT: + case CEPH_CAP_OP_REVOKE: + case CEPH_CAP_OP_GRANT: + do_cap_release = true; + ldout(cct, 5) << __func__ << " don't have vino " << vino << " op " + << m->get_op() << ", immediately releasing" << dendl; + break; + default: + ldout(cct, 5) << __func__ << " don't have vino " << vino << ", dropping" << dendl; + return; } + } + + // In case the mds is waiting on e.g. a revocation + if (do_cap_release) { + session->enqueue_cap_release( + m->get_ino(), + m->get_cap_id(), + m->get_seq(), + m->get_mseq(), + cap_epoch_barrier); - // in case the mds is waiting on e.g. a revocation flush_cap_releases(); return; } @@ -5757,6 +5856,13 @@ void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const M } } + // just in case the caps was released just before we get the revoke msg + if (!check && m->get_op() == CEPH_CAP_OP_REVOKE) { + cap->wanted = 0; // don't let check_caps skip sending a response to MDS + check = true; + flags = CHECK_CAPS_NODELAY; + } + if (check) check_caps(in, flags); @@ -6888,6 +6994,13 @@ void Client::collect_and_send_global_metrics() { ldout(cct, 20) << __func__ << dendl; ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); + /* Do not send the metrics until the MDS rank is ready */ + if (!mdsmap->is_active((mds_rank_t)0)) { + ldout(cct, 5) << __func__ << " MDS rank 0 is not ready yet -- not sending metric" + << dendl; + return; + } + if (!have_open_session((mds_rank_t)0)) { ldout(cct, 5) << __func__ << ": no session with rank=0 -- not sending metric" << dendl; @@ -7059,7 +7172,8 @@ bool Client::_dentry_valid(const Dentry *dn) } int Client::_lookup(Inode *dir, const string& dname, int mask, InodeRef *target, - const UserPerm& perms, std::string* alternate_name) + const UserPerm& perms, std::string* alternate_name, + bool is_rename) { int r = 0; Dentry *dn = NULL; @@ -7138,6 +7252,19 @@ relookup: } else { ldout(cct, 20) << " no cap on " << dn->inode->vino() << dendl; } + + // In rare case during the rename if another thread tries to + // lookup the dst dentry, it may get an inconsistent result + // that both src dentry and dst dentry will link to the same + // inode at the same time. + // Will wait the rename to finish and try it again. + if (!is_rename && dn->is_renaming) { + ldout(cct, 1) << __func__ << " dir " << *dir + << " rename is on the way, will wait for dn '" + << dname << "'" << dendl; + wait_on_list(waiting_for_rename); + goto relookup; + } } else { // can we conclude ENOENT locally? if (dir->caps_issued_mask(CEPH_CAP_FILE_SHARED, true) && @@ -9055,7 +9182,8 @@ void Client::_readdir_drop_dirp_buffer(dir_result_t *dirp) dirp->buffer.clear(); } -int Client::_readdir_get_frag(dir_result_t *dirp) +int Client::_readdir_get_frag(int op, dir_result_t* dirp, + fill_readdir_args_cb_t fill_req_cb) { ceph_assert(dirp); ceph_assert(dirp->inode); @@ -9070,33 +9198,18 @@ int Client::_readdir_get_frag(dir_result_t *dirp) ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino << " fg " << fg << " offset " << hex << dirp->offset << dec << dendl; - int op = CEPH_MDS_OP_READDIR; - if (dirp->inode && dirp->inode->snapid == CEPH_SNAPDIR) - op = CEPH_MDS_OP_LSSNAP; - InodeRef& diri = dirp->inode; MetaRequest *req = new MetaRequest(op); - filepath path; - diri->make_nosnap_relative_path(path); - req->set_filepath(path); - req->set_inode(diri.get()); - req->head.args.readdir.frag = fg; - req->head.args.readdir.flags = CEPH_READDIR_REPLY_BITFLAGS; - if (dirp->last_name.length()) { - req->path2.set_path(dirp->last_name); - } else if (dirp->hash_order()) { - req->head.args.readdir.offset_hash = dirp->offset_high(); - } - req->dirp = dirp; - + fill_req_cb(dirp, req, diri, fg); + bufferlist dirbl; int res = make_request(req, dirp->perms, NULL, NULL, -1, &dirbl); if (res == -CEPHFS_EAGAIN) { ldout(cct, 10) << __func__ << " got EAGAIN, retrying" << dendl; _readdir_rechoose_frag(dirp); - return _readdir_get_frag(dirp); + return _readdir_get_frag(op, dirp, fill_req_cb); } if (res == 0) { @@ -9121,7 +9234,8 @@ int Client::_readdir_cache_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p, { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino - << " last_name " << dirp->last_name << " offset " << hex << dirp->offset << dec + << " last_name " << dirp->last_name + << " offset " << hex << dirp->offset << dec << dendl; Dir *dir = dirp->inode->dir; @@ -9210,8 +9324,57 @@ int Client::_readdir_cache_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p, return 0; } -int Client::readdir_r_cb(dir_result_t *d, add_dirent_cb_t cb, void *p, - unsigned want, unsigned flags, bool getref) +int Client::readdir_r_cb(dir_result_t* d, + add_dirent_cb_t cb, + void* p, + unsigned want, + unsigned flags, + bool getref) +{ + auto fill_readdir_cb = [](dir_result_t* dirp, + MetaRequest* req, + InodeRef& diri, + frag_t fg) { + filepath path; + diri->make_nosnap_relative_path(path); + req->set_filepath(path); + req->set_inode(diri.get()); + req->head.args.readdir.frag = fg; + req->head.args.readdir.flags = CEPH_READDIR_REPLY_BITFLAGS; + if (dirp->last_name.length()) { + req->path2.set_path(dirp->last_name); + } else if (dirp->hash_order()) { + req->head.args.readdir.offset_hash = dirp->offset_high(); + } + req->dirp = dirp; + }; + int op = CEPH_MDS_OP_READDIR; + if (d->inode && d->inode->snapid == CEPH_SNAPDIR) + op = CEPH_MDS_OP_LSSNAP; + return _readdir_r_cb(op, + d, + cb, + fill_readdir_cb, + p, + want, + flags, + getref, + false); +} + +// +// NB: this is used for both readdir and readdir_snapdiff results processing +// hence it should be request type agnostic +// +int Client::_readdir_r_cb(int op, + dir_result_t *d, + add_dirent_cb_t cb, + fill_readdir_args_cb_t fill_cb, + void *p, + unsigned want, + unsigned flags, + bool getref, + bool bypass_cache) { int caps = statx_to_mask(flags, want); @@ -9301,12 +9464,14 @@ int Client::readdir_r_cb(dir_result_t *d, add_dirent_cb_t cb, void *p, } // can we read from our cache? - ldout(cct, 10) << "offset " << hex << dirp->offset << dec + ldout(cct, 10) << __func__ + << " offset " << hex << dirp->offset << dec << " snapid " << dirp->inode->snapid << " (complete && ordered) " << dirp->inode->is_complete_and_ordered() << " issued " << ccap_string(dirp->inode->caps_issued()) << dendl; - if (dirp->inode->snapid != CEPH_SNAPDIR && + if (!bypass_cache && + dirp->inode->snapid != CEPH_SNAPDIR && dirp->inode->is_complete_and_ordered() && dirp->inode->caps_issued_mask(CEPH_CAP_FILE_SHARED, true)) { int err = _readdir_cache_cb(dirp, cb, p, caps, getref); @@ -9320,7 +9485,7 @@ int Client::readdir_r_cb(dir_result_t *d, add_dirent_cb_t cb, void *p, bool check_caps = true; if (!dirp->is_cached()) { - int r = _readdir_get_frag(dirp); + int r = _readdir_get_frag(op, dirp, fill_cb); if (r) return r; // _readdir_get_frag () may updates dirp->offset if the replied dirfrag is @@ -9329,7 +9494,8 @@ int Client::readdir_r_cb(dir_result_t *d, add_dirent_cb_t cb, void *p, } frag_t fg = dirp->buffer_frag; - ldout(cct, 10) << "frag " << fg << " buffer size " << dirp->buffer.size() + ldout(cct, 10) << __func__ + << " frag " << fg << " buffer size " << dirp->buffer.size() << " offset " << hex << dirp->offset << dendl; for (auto it = std::lower_bound(dirp->buffer.begin(), dirp->buffer.end(), @@ -9364,7 +9530,9 @@ int Client::readdir_r_cb(dir_result_t *d, add_dirent_cb_t cb, void *p, r = cb(p, &de, &stx, next_off, inode); // _next_ offset cl.lock(); - ldout(cct, 15) << " de " << de.d_name << " off " << hex << next_off - 1 << dec + ldout(cct, 15) << __func__ + << " de " << de.d_name << " off " << hex << next_off - 1 << dec + << " snap " << entry.inode->snapid << " = " << r << dendl; if (r < 0) return r; @@ -9386,7 +9554,8 @@ int Client::readdir_r_cb(dir_result_t *d, add_dirent_cb_t cb, void *p, continue; } - if (diri->shared_gen == dirp->start_shared_gen && + if (!bypass_cache && + diri->shared_gen == dirp->start_shared_gen && diri->dir_release_count == dirp->release_count) { if (diri->dir_ordered_count == dirp->ordered_count) { ldout(cct, 10) << " marking (I_COMPLETE|I_DIR_ORDERED) on " << *diri << dendl; @@ -9492,6 +9661,81 @@ int Client::readdirplus_r(dir_result_t *d, struct dirent *de, return 0; } +int Client::readdir_snapdiff(dir_result_t* d1, snapid_t snap2, + struct dirent* out_de, + snapid_t* out_snap) +{ + if (!d1 || !d1->inode || d1->inode->snapid == snap2) { + lderr(cct) << __func__ << " invalid parameters: " + << " d1:" << d1 + << " d1->inode:" << (d1 ? d1->inode : nullptr) + << " snap2 id :" << snap2 + << dendl; + errno = EINVAL; + return -errno; + } + + auto& de = d1->de; + ceph_statx stx; + single_readdir sr; + sr.de = &de; + sr.stx = &stx; + sr.inode = NULL; + sr.full = false; + + auto fill_snapdiff_cb = [&](dir_result_t* dirp, + MetaRequest* req, + InodeRef& diri, + frag_t fg) { + filepath path; + diri->make_nosnap_relative_path(path); + req->set_filepath(path); + req->set_inode(diri.get()); + req->head.args.snapdiff.snap_other = snap2; + req->head.args.snapdiff.frag = fg; + req->head.args.snapdiff.flags = CEPH_READDIR_REPLY_BITFLAGS; + if (dirp->last_name.length()) { + req->path2.set_path(dirp->last_name); + } else if (dirp->hash_order()) { + req->head.args.snapdiff.offset_hash = dirp->offset_high(); + } + req->dirp = dirp; + }; + + // our callback fills the dirent and sets sr.full=true on first + // call, and returns -1 the second time around. + int ret = _readdir_r_cb(CEPH_MDS_OP_READDIR_SNAPDIFF, + d1, + _readdir_single_dirent_cb, + fill_snapdiff_cb, + (void*)&sr, + 0, + AT_STATX_DONT_SYNC, + false, + true); + if (ret < -1) { + lderr(cct) << __func__ << " error: " + << cpp_strerror(ret) + << dendl; + errno = -ret; // this sucks. + return ret; + } + + ldout(cct, 15) << __func__ << " " << ret + << " " << sr.de->d_name + << " " << stx.stx_dev + << dendl; + if (sr.full) { + if (out_de) { + *out_de = de; + } + if (out_snap) { + *out_snap = stx.stx_dev; + } + return 1; + } + return 0; +} /* getdents */ struct getdents_result { @@ -13569,6 +13813,8 @@ int Client::_mknod(Inode *dir, const char *name, mode_t mode, dev_t rdev, MetaRequest *req = new MetaRequest(CEPH_MDS_OP_MKNOD); + req->set_inode_owner_uid_gid(perms.uid(), perms.gid()); + filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); @@ -13713,6 +13959,8 @@ int Client::_create(Inode *dir, const char *name, int flags, mode_t mode, MetaRequest *req = new MetaRequest(CEPH_MDS_OP_CREATE); + req->set_inode_owner_uid_gid(perms.uid(), perms.gid()); + filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); @@ -13790,6 +14038,9 @@ int Client::_mkdir(Inode *dir, const char *name, mode_t mode, const UserPerm& pe MetaRequest *req = new MetaRequest(is_snap_op ? CEPH_MDS_OP_MKSNAP : CEPH_MDS_OP_MKDIR); + if (!is_snap_op) + req->set_inode_owner_uid_gid(perm.uid(), perm.gid()); + filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); @@ -13928,6 +14179,8 @@ int Client::_symlink(Inode *dir, const char *name, const char *target, MetaRequest *req = new MetaRequest(CEPH_MDS_OP_SYMLINK); + req->set_inode_owner_uid_gid(perms.uid(), perms.gid()); + filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); @@ -14180,11 +14433,13 @@ int Client::_rename(Inode *fromdir, const char *fromname, Inode *todir, const ch else return -CEPHFS_EROFS; } + + // don't allow cross-quota renames if (cct->_conf.get_val("client_quota") && fromdir != todir) { Inode *fromdir_root = - fromdir->quota.is_enabled(QUOTA_MAX_FILES) ? fromdir : get_quota_root(fromdir, perm, QUOTA_MAX_FILES); + fromdir->quota.is_enabled() ? fromdir : get_quota_root(fromdir, perm); Inode *todir_root = - todir->quota.is_enabled(QUOTA_MAX_FILES) ? todir : get_quota_root(todir, perm, QUOTA_MAX_FILES); + todir->quota.is_enabled() ? todir : get_quota_root(todir, perm); if (fromdir_root != todir_root) { return -CEPHFS_EXDEV; } @@ -14212,12 +14467,13 @@ int Client::_rename(Inode *fromdir, const char *fromname, Inode *todir, const ch req->old_dentry_drop = CEPH_CAP_FILE_SHARED; req->old_dentry_unless = CEPH_CAP_FILE_EXCL; + de->is_renaming = true; req->set_dentry(de); req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; InodeRef oldin, otherin; - res = _lookup(fromdir, fromname, 0, &oldin, perm); + res = _lookup(fromdir, fromname, 0, &oldin, perm, nullptr, true); if (res < 0) goto fail; @@ -14226,7 +14482,7 @@ int Client::_rename(Inode *fromdir, const char *fromname, Inode *todir, const ch req->set_old_inode(oldinode); req->old_inode_drop = CEPH_CAP_LINK_SHARED; - res = _lookup(todir, toname, 0, &otherin, perm); + res = _lookup(todir, toname, 0, &otherin, perm, nullptr, true); switch (res) { case 0: { @@ -14255,6 +14511,12 @@ int Client::_rename(Inode *fromdir, const char *fromname, Inode *todir, const ch res = make_request(req, perm, &target); ldout(cct, 10) << "rename result is " << res << dendl; + // if rename fails it will miss waking up the waiters + if (op == CEPH_MDS_OP_RENAME && de->is_renaming) { + de->is_renaming = false; + signal_cond_list(waiting_for_rename); + } + // renamed item from our cache trim_cache(); diff --git a/ceph/src/client/Client.h b/ceph/src/client/Client.h index 97c2ad733..911a8b460 100644 --- a/ceph/src/client/Client.h +++ b/ceph/src/client/Client.h @@ -362,6 +362,13 @@ public: int readdir_r(dir_result_t *dirp, struct dirent *de); int readdirplus_r(dir_result_t *dirp, struct dirent *de, struct ceph_statx *stx, unsigned want, unsigned flags, Inode **out); + /* + * Get the next snapshot delta entry. + * + */ + int readdir_snapdiff(dir_result_t* dir1, snapid_t snap2, + struct dirent* out_de, snapid_t* out_snap); + int getdir(const char *relpath, std::list& names, const UserPerm& perms); // get the whole dir at once. @@ -791,7 +798,8 @@ public: void update_dir_dist(Inode *in, DirStat *st, mds_rank_t from); void clear_dir_complete_and_ordered(Inode *diri, bool complete); - void insert_readdir_results(MetaRequest *request, MetaSession *session, Inode *diri); + void insert_readdir_results(MetaRequest *request, MetaSession *session, + Inode *diri, Inode *diri_other); Inode* insert_trace(MetaRequest *request, MetaSession *session); void update_inode_file_size(Inode *in, int issued, uint64_t size, uint64_t truncate_seq, uint64_t truncate_size); @@ -1271,6 +1279,8 @@ private: MAY_READ = 4, }; + typedef std::function fill_readdir_args_cb_t; + std::unique_ptr> cct_deleter; /* Flags for VXattr */ @@ -1291,8 +1301,19 @@ private: bool _readdir_have_frag(dir_result_t *dirp); void _readdir_next_frag(dir_result_t *dirp); void _readdir_rechoose_frag(dir_result_t *dirp); - int _readdir_get_frag(dir_result_t *dirp); + int _readdir_get_frag(int op, dir_result_t *dirp, + fill_readdir_args_cb_t fill_req_cb); int _readdir_cache_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p, int caps, bool getref); + int _readdir_r_cb(int op, + dir_result_t* d, + add_dirent_cb_t cb, + fill_readdir_args_cb_t fill_cb, + void* p, + unsigned want, + unsigned flags, + bool getref, + bool bypass_cache); + void _closedir(dir_result_t *dirp); // other helpers @@ -1320,7 +1341,8 @@ private: const UserPerm& perms); int _lookup(Inode *dir, const std::string& dname, int mask, InodeRef *target, - const UserPerm& perm, std::string* alternate_name=nullptr); + const UserPerm& perm, std::string* alternate_name=nullptr, + bool is_rename=false); int _link(Inode *in, Inode *dir, const char *name, const UserPerm& perm, std::string alternate_name, InodeRef *inp = 0); @@ -1573,6 +1595,8 @@ private: std::map, int> pool_perms; std::list waiting_for_pool_perm; + std::list waiting_for_rename; + uint64_t retries_on_invalidate = 0; // state reclaim diff --git a/ceph/src/client/Dentry.h b/ceph/src/client/Dentry.h index 94722c5de..8003dfed3 100644 --- a/ceph/src/client/Dentry.h +++ b/ceph/src/client/Dentry.h @@ -91,6 +91,7 @@ public: ceph_seq_t lease_seq = 0; int cap_shared_gen = 0; std::string alternate_name; + bool is_renaming = false; private: xlist::item inode_xlist_link; diff --git a/ceph/src/client/MetaRequest.cc b/ceph/src/client/MetaRequest.cc index 3994424e7..6d709db58 100644 --- a/ceph/src/client/MetaRequest.cc +++ b/ceph/src/client/MetaRequest.cc @@ -51,6 +51,9 @@ void MetaRequest::dump(Formatter *f) const f->dump_unsigned("num_releases", head.num_releases); f->dump_int("abort_rc", abort_rc); + + f->dump_unsigned("owner_uid", head.owner_uid); + f->dump_unsigned("owner_gid", head.owner_gid); } MetaRequest::~MetaRequest() diff --git a/ceph/src/client/MetaRequest.h b/ceph/src/client/MetaRequest.h index a1c9f9459..49ee6dc6e 100644 --- a/ceph/src/client/MetaRequest.h +++ b/ceph/src/client/MetaRequest.h @@ -80,6 +80,8 @@ public: unsafe_target_item(this) { memset(&head, 0, sizeof(head)); head.op = op; + head.owner_uid = -1; + head.owner_gid = -1; } ~MetaRequest(); @@ -153,6 +155,13 @@ public: return v == 0; } + void set_inode_owner_uid_gid(unsigned u, unsigned g) { + /* it makes sense to set owner_{u,g}id only for OPs which create inodes */ + ceph_assert(IS_CEPH_MDS_OP_NEWINODE(head.op)); + head.owner_uid = u; + head.owner_gid = g; + } + // normal fields void set_tid(ceph_tid_t t) { tid = t; } void set_oldest_client_tid(ceph_tid_t t) { head.oldest_client_tid = t; } diff --git a/ceph/src/client/MetaSession.h b/ceph/src/client/MetaSession.h index ad74ae58a..301306263 100644 --- a/ceph/src/client/MetaSession.h +++ b/ceph/src/client/MetaSession.h @@ -62,6 +62,13 @@ struct MetaSession { MetaSession(mds_rank_t mds_num, ConnectionRef con, const entity_addrvec_t& addrs) : mds_num(mds_num), con(con), addrs(addrs) { } + ~MetaSession() { + ceph_assert(caps.empty()); + ceph_assert(dirty_list.empty()); + ceph_assert(flushing_caps.empty()); + ceph_assert(requests.empty()); + ceph_assert(unsafe_requests.empty()); + } xlist &get_dirty_list() { return dirty_list; } diff --git a/ceph/src/cls/rgw/cls_rgw.cc b/ceph/src/cls/rgw/cls_rgw.cc index 0e8d53e94..a7e1b65e8 100644 --- a/ceph/src/cls/rgw/cls_rgw.cc +++ b/ceph/src/cls/rgw/cls_rgw.cc @@ -676,77 +676,6 @@ int rgw_bucket_list(cls_method_context_t hctx, bufferlist *in, bufferlist *out) } } // rgw_bucket_list - -static int check_index(cls_method_context_t hctx, - rgw_bucket_dir_header *existing_header, - rgw_bucket_dir_header *calc_header) -{ - int rc = read_bucket_header(hctx, existing_header); - if (rc < 0) { - CLS_LOG(1, "ERROR: check_index(): failed to read header\n"); - return rc; - } - - calc_header->tag_timeout = existing_header->tag_timeout; - calc_header->ver = existing_header->ver; - calc_header->syncstopped = existing_header->syncstopped; - - map keys; - string start_obj; - string filter_prefix; - -#define CHECK_CHUNK_SIZE 1000 - bool done = false; - bool more; - - do { - rc = get_obj_vals(hctx, start_obj, filter_prefix, CHECK_CHUNK_SIZE, &keys, &more); - if (rc < 0) - return rc; - - for (auto kiter = keys.begin(); kiter != keys.end(); ++kiter) { - if (!bi_is_plain_entry(kiter->first)) { - done = true; - break; - } - - rgw_bucket_dir_entry entry; - auto eiter = kiter->second.cbegin(); - try { - decode(entry, eiter); - } catch (ceph::buffer::error& err) { - CLS_LOG(1, "ERROR: rgw_bucket_list(): failed to decode entry, key=%s", kiter->first.c_str()); - return -EIO; - } - if (entry.exists) { - rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category]; - stats.num_entries++; - stats.total_size += entry.meta.accounted_size; - stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); - stats.actual_size += entry.meta.size; - } - - start_obj = kiter->first; - } - } while (keys.size() == CHECK_CHUNK_SIZE && !done); - - return 0; -} - -int rgw_bucket_check_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out) -{ - CLS_LOG(10, "entered %s", __func__); - rgw_cls_check_index_ret ret; - - int rc = check_index(hctx, &ret.existing_header, &ret.calculated_header); - if (rc < 0) - return rc; - - encode(ret, *out); - - return 0; -} - static int write_bucket_header(cls_method_context_t hctx, rgw_bucket_dir_header *header) { header->ver++; @@ -757,18 +686,6 @@ static int write_bucket_header(cls_method_context_t hctx, rgw_bucket_dir_header } -int rgw_bucket_rebuild_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out) -{ - CLS_LOG(10, "entered %s", __func__); - rgw_bucket_dir_header existing_header; - rgw_bucket_dir_header calc_header; - int rc = check_index(hctx, &existing_header, &calc_header); - if (rc < 0) - return rc; - - return write_bucket_header(hctx, &calc_header); -} - int rgw_bucket_update_stats(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { CLS_LOG(10, "entered %s", __func__); @@ -1898,6 +1815,9 @@ static int rgw_bucket_link_olh(cls_method_context_t hctx, bufferlist *in, buffer return ret; } olh.set_tag(op.olh_tag); + if (op.key.instance.empty()){ + obj.set_epoch(1); + } } /* update the olh log */ @@ -3133,6 +3053,115 @@ static int list_olh_entries(cls_method_context_t hctx, return count; } +static int check_index(cls_method_context_t hctx, + rgw_bucket_dir_header *existing_header, + rgw_bucket_dir_header *calc_header) +{ + int rc = read_bucket_header(hctx, existing_header); + if (rc < 0) { + CLS_LOG(1, "ERROR: check_index(): failed to read header\n"); + return rc; + } + + calc_header->tag_timeout = existing_header->tag_timeout; + calc_header->ver = existing_header->ver; + calc_header->syncstopped = existing_header->syncstopped; + + std::list entries; + string start_obj; + string filter_prefix; + +#define CHECK_CHUNK_SIZE 1000 + bool more; + + do { + rc = list_plain_entries(hctx, filter_prefix, start_obj, CHECK_CHUNK_SIZE, &entries, &more); + if (rc < 0) { + return rc; + } + + for (const auto & bientry : entries) { + rgw_bucket_dir_entry entry; + auto diter = bientry.data.cbegin(); + try { + decode(entry, diter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR:check_index(): failed to decode entry, key=%s", bientry.idx.c_str()); + return -EIO; + } + + if (entry.exists && entry.flags == 0) { + rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category]; + stats.num_entries++; + stats.total_size += entry.meta.accounted_size; + stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); + stats.actual_size += entry.meta.size; + } + start_obj = bientry.idx; + } + entries.clear(); + } while (more); + + start_obj = ""; + do { + rc = list_instance_entries(hctx, filter_prefix, start_obj, CHECK_CHUNK_SIZE, &entries, &more); + if (rc < 0) { + return rc; + } + + for (const auto & bientry : entries) { + rgw_bucket_dir_entry entry; + auto diter = bientry.data.cbegin(); + try { + decode(entry, diter); + } catch (ceph::buffer::error& err) { + CLS_LOG(1, "ERROR:check_index(): failed to decode entry, key=%s", bientry.idx.c_str()); + return -EIO; + } + + if (entry.exists) { + rgw_bucket_category_stats& stats = calc_header->stats[entry.meta.category]; + stats.num_entries++; + stats.total_size += entry.meta.accounted_size; + stats.total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); + stats.actual_size += entry.meta.size; + } + start_obj = bientry.idx; + } + entries.clear(); + } while (more); + + return 0; +} + +int rgw_bucket_rebuild_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + rgw_bucket_dir_header existing_header; + rgw_bucket_dir_header calc_header; + int rc = check_index(hctx, &existing_header, &calc_header); + if (rc < 0) + return rc; + + return write_bucket_header(hctx, &calc_header); +} + + +int rgw_bucket_check_index(cls_method_context_t hctx, bufferlist *in, bufferlist *out) +{ + CLS_LOG(10, "entered %s", __func__); + rgw_cls_check_index_ret ret; + + int rc = check_index(hctx, &ret.existing_header, &ret.calculated_header); + if (rc < 0) + return rc; + + encode(ret, *out); + + return 0; +} + + /* Lists all the entries that appear in a bucket index listing. * * It may not be obvious why this function calls three other "segment" diff --git a/ceph/src/cls/rgw/cls_rgw_types.cc b/ceph/src/cls/rgw/cls_rgw_types.cc index 14e0d9884..3a71860c3 100644 --- a/ceph/src/cls/rgw/cls_rgw_types.cc +++ b/ceph/src/cls/rgw/cls_rgw_types.cc @@ -371,39 +371,31 @@ bool rgw_cls_bi_entry::get_info(cls_rgw_obj_key *key, RGWObjCategory *category, rgw_bucket_category_stats *accounted_stats) { - bool account = false; - auto iter = data.cbegin(); using ceph::decode; - switch (type) { - case BIIndexType::Plain: - account = true; - // NO BREAK; falls through to case InstanceIdx: - case BIIndexType::Instance: - { - rgw_bucket_dir_entry entry; - decode(entry, iter); - account = (account && entry.exists); - *key = entry.key; - *category = entry.meta.category; - accounted_stats->num_entries++; - accounted_stats->total_size += entry.meta.accounted_size; - accounted_stats->total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); - accounted_stats->actual_size += entry.meta.size; - } - break; - case BIIndexType::OLH: - { - rgw_bucket_olh_entry entry; - decode(entry, iter); - *key = entry.key; - } - break; - default: - break; + auto iter = data.cbegin(); + if (type == BIIndexType::OLH) { + rgw_bucket_olh_entry entry; + decode(entry, iter); + *key = entry.key; + return false; } - return account; + rgw_bucket_dir_entry entry; + decode(entry, iter); + *key = entry.key; + *category = entry.meta.category; + accounted_stats->num_entries++; + accounted_stats->total_size += entry.meta.accounted_size; + accounted_stats->total_size_rounded += cls_rgw_get_rounded_size(entry.meta.accounted_size); + accounted_stats->actual_size += entry.meta.size; + if (type == BIIndexType::Plain) { + return entry.exists && entry.flags == 0; + } else if (type == BIIndexType::Instance) { + return entry.exists; + } + return false; } + void rgw_cls_bi_entry::generate_test_instances(list& o) { using ceph::encode; diff --git a/ceph/src/common/Formatter.cc b/ceph/src/common/Formatter.cc index 362deffb5..f121afa07 100644 --- a/ceph/src/common/Formatter.cc +++ b/ceph/src/common/Formatter.cc @@ -311,6 +311,11 @@ void JSONFormatter::add_value(std::string_view name, std::string_view val, bool } } +void JSONFormatter::dump_null(std::string_view name) +{ + add_value(name, "null"); +} + void JSONFormatter::dump_unsigned(std::string_view name, uint64_t u) { add_value(name, u); @@ -441,14 +446,20 @@ void XMLFormatter::open_array_section_in_ns(std::string_view name, const char *n open_section_in_ns(name, ns, NULL); } +std::string XMLFormatter::get_xml_name(std::string_view name) const +{ + std::string e(name); + std::transform(e.begin(), e.end(), e.begin(), + [this](char c) { return this->to_lower_underscore(c); }); + return e; +} + void XMLFormatter::close_section() { ceph_assert(!m_sections.empty()); finish_pending_string(); - std::string section = m_sections.back(); - std::transform(section.begin(), section.end(), section.begin(), - [this](char c) { return this->to_lower_underscore(c); }); + auto section = get_xml_name(m_sections.back()); m_sections.pop_back(); print_spaces(); m_ss << ""; @@ -459,10 +470,7 @@ void XMLFormatter::close_section() template void XMLFormatter::add_value(std::string_view name, T val) { - std::string e(name); - std::transform(e.begin(), e.end(), e.begin(), - [this](char c) { return this->to_lower_underscore(c); }); - + auto e = get_xml_name(name); print_spaces(); m_ss.precision(std::numeric_limits::max_digits10); m_ss << "<" << e << ">" << val << ""; @@ -470,6 +478,14 @@ void XMLFormatter::add_value(std::string_view name, T val) m_ss << "\n"; } +void XMLFormatter::dump_null(std::string_view name) +{ + print_spaces(); + m_ss << "<" << get_xml_name(name) << " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:nil=\"true\" />"; + if (m_pretty) + m_ss << "\n"; +} + void XMLFormatter::dump_unsigned(std::string_view name, uint64_t u) { add_value(name, u); @@ -487,10 +503,7 @@ void XMLFormatter::dump_float(std::string_view name, double d) void XMLFormatter::dump_string(std::string_view name, std::string_view s) { - std::string e(name); - std::transform(e.begin(), e.end(), e.begin(), - [this](char c) { return this->to_lower_underscore(c); }); - + auto e = get_xml_name(name); print_spaces(); m_ss << "<" << e << ">" << xml_stream_escaper(s) << ""; if (m_pretty) @@ -499,10 +512,7 @@ void XMLFormatter::dump_string(std::string_view name, std::string_view s) void XMLFormatter::dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs) { - std::string e(name); - std::transform(e.begin(), e.end(), e.begin(), - [this](char c) { return this->to_lower_underscore(c); }); - + auto e = get_xml_name(name); std::string attrs_str; get_attrs_str(&attrs, attrs_str); print_spaces(); @@ -523,9 +533,7 @@ void XMLFormatter::dump_format_va(std::string_view name, const char *ns, bool qu { char buf[LARGE_SIZE]; size_t len = vsnprintf(buf, LARGE_SIZE, fmt, ap); - std::string e(name); - std::transform(e.begin(), e.end(), e.begin(), - [this](char c) { return this->to_lower_underscore(c); }); + auto e = get_xml_name(name); print_spaces(); if (ns) { @@ -577,9 +585,7 @@ void XMLFormatter::open_section_in_ns(std::string_view name, const char *ns, con get_attrs_str(attrs, attrs_str); } - std::string e(name); - std::transform(e.begin(), e.end(), e.begin(), - [this](char c) { return this->to_lower_underscore(c); }); + auto e = get_xml_name(name); if (ns) { m_ss << "<" << e << attrs_str << " xmlns=\"" << ns << "\">"; @@ -852,6 +858,11 @@ void TableFormatter::add_value(std::string_view name, T val) { m_ss.str(""); } +void TableFormatter::dump_null(std::string_view name) +{ + add_value(name, "null"); +} + void TableFormatter::dump_unsigned(std::string_view name, uint64_t u) { add_value(name, u); diff --git a/ceph/src/common/Formatter.h b/ceph/src/common/Formatter.h index abdc172cb..1919b018a 100644 --- a/ceph/src/common/Formatter.h +++ b/ceph/src/common/Formatter.h @@ -87,6 +87,7 @@ namespace ceph { virtual void open_object_section(std::string_view name) = 0; virtual void open_object_section_in_ns(std::string_view name, const char *ns) = 0; virtual void close_section() = 0; + virtual void dump_null(std::string_view name) = 0; virtual void dump_unsigned(std::string_view name, uint64_t u) = 0; virtual void dump_int(std::string_view name, int64_t s) = 0; virtual void dump_float(std::string_view name, double d) = 0; @@ -156,6 +157,7 @@ namespace ceph { void open_object_section(std::string_view name) override; void open_object_section_in_ns(std::string_view name, const char *ns) override; void close_section() override; + void dump_null(std::string_view name) override; void dump_unsigned(std::string_view name, uint64_t u) override; void dump_int(std::string_view name, int64_t s) override; void dump_float(std::string_view name, double d) override; @@ -228,6 +230,7 @@ namespace ceph { void open_object_section(std::string_view name) override; void open_object_section_in_ns(std::string_view name, const char *ns) override; void close_section() override; + void dump_null(std::string_view name) override; void dump_unsigned(std::string_view name, uint64_t u) override; void dump_int(std::string_view name, int64_t s) override; void dump_float(std::string_view name, double d) override; @@ -249,6 +252,7 @@ namespace ceph { void print_spaces(); void get_attrs_str(const FormatterAttrs *attrs, std::string& attrs_str); char to_lower_underscore(char c) const; + std::string get_xml_name(std::string_view name) const; std::stringstream m_ss, m_pending_string; std::deque m_sections; @@ -283,6 +287,7 @@ namespace ceph { void open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override; void close_section() override; + void dump_null(std::string_view name) override; void dump_unsigned(std::string_view name, uint64_t u) override; void dump_int(std::string_view name, int64_t s) override; void dump_float(std::string_view name, double d) override; diff --git a/ceph/src/common/TrackedOp.cc b/ceph/src/common/TrackedOp.cc index d63bdb8f9..32a1ab472 100644 --- a/ceph/src/common/TrackedOp.cc +++ b/ceph/src/common/TrackedOp.cc @@ -134,7 +134,7 @@ void OpHistory::dump_ops(utime_t now, Formatter *f, set filters, bool by if (!i->second->filter_out(filters)) continue; f->open_object_section("op"); - i->second->dump(now, f); + i->second->dump(now, f, OpTracker::default_dumper); f->close_section(); } }; @@ -214,7 +214,7 @@ void OpHistory::dump_slow_ops(utime_t now, Formatter *f, set filters) if (!i->second->filter_out(filters)) continue; f->open_object_section("Op"); - i->second->dump(now, f); + i->second->dump(now, f, OpTracker::default_dumper); f->close_section(); } f->close_section(); @@ -233,7 +233,7 @@ bool OpTracker::dump_historic_slow_ops(Formatter *f, set filters) return true; } -bool OpTracker::dump_ops_in_flight(Formatter *f, bool print_only_blocked, set filters, bool count_only) +bool OpTracker::dump_ops_in_flight(Formatter *f, bool print_only_blocked, set filters, bool count_only, dumper lambda) { if (!tracking_enabled) return false; @@ -259,7 +259,7 @@ bool OpTracker::dump_ops_in_flight(Formatter *f, bool print_only_blocked, setopen_object_section("op"); - op.dump(now, f); + op.dump(now, f, lambda); f->close_section(); // this TrackedOp } @@ -496,7 +496,7 @@ void TrackedOp::mark_event(std::string_view event, utime_t stamp) _event_marked(); } -void TrackedOp::dump(utime_t now, Formatter *f) const +void TrackedOp::dump(utime_t now, Formatter *f, OpTracker::dumper lambda) const { // Ignore if still in the constructor if (!state) @@ -507,7 +507,7 @@ void TrackedOp::dump(utime_t now, Formatter *f) const f->dump_float("duration", get_duration()); { f->open_object_section("type_data"); - _dump(f); + lambda(*this, f); f->close_section(); } } diff --git a/ceph/src/common/TrackedOp.h b/ceph/src/common/TrackedOp.h index 0ff7430b3..477f6c959 100644 --- a/ceph/src/common/TrackedOp.h +++ b/ceph/src/common/TrackedOp.h @@ -15,6 +15,7 @@ #define TRACKEDREQUEST_H_ #include +#include "common/StackStringStream.h" #include "common/ceph_mutex.h" #include "common/histogram.h" #include "common/Thread.h" @@ -129,6 +130,8 @@ class OpTracker { ceph::shared_mutex lock = ceph::make_shared_mutex("OpTracker::lock"); public: + using dumper = std::function; + CephContext *cct; OpTracker(CephContext *cct_, bool tracking, uint32_t num_shards); @@ -148,7 +151,8 @@ public: void set_tracking(bool enable) { tracking_enabled = enable; } - bool dump_ops_in_flight(ceph::Formatter *f, bool print_only_blocked = false, std::set filters = {""}, bool count_only = false); + static void default_dumper(const TrackedOp& op, Formatter* f); + bool dump_ops_in_flight(ceph::Formatter *f, bool print_only_blocked = false, std::set filters = {""}, bool count_only = false, dumper lambda = default_dumper); bool dump_historic_ops(ceph::Formatter *f, bool by_duration = false, std::set filters = {""}); bool dump_historic_slow_ops(ceph::Formatter *f, std::set filters = {""}); bool register_inflight_op(TrackedOp *i); @@ -278,10 +282,6 @@ protected: }; std::atomic state = {STATE_UNTRACKED}; - mutable std::string desc_str; ///< protected by lock - mutable const char *desc = nullptr; ///< readable without lock - mutable std::atomic want_new_desc = {false}; - TrackedOp(OpTracker *_tracker, const utime_t& initiated) : tracker(_tracker), initiated_at(initiated) @@ -294,7 +294,7 @@ protected: /// if you want something else to happen when events are marked, implement virtual void _event_marked() {} /// return a unique descriptor of the Op; eg the message it's attached to - virtual void _dump_op_descriptor_unlocked(std::ostream& stream) const = 0; + virtual void _dump_op_descriptor(std::ostream& stream) const = 0; /// called when the last non-OpTracker reference is dropped virtual void _unregistered() {} @@ -346,26 +346,41 @@ public: } } - const char *get_desc() const { - if (!desc || want_new_desc.load()) { - std::lock_guard l(lock); - _gen_desc(); + std::string get_desc() const { + std::string ret; + { + std::lock_guard l(desc_lock); + ret = desc; + } + if (ret.size() == 0 || want_new_desc.load()) { + CachedStackStringStream css; + std::scoped_lock l(lock, desc_lock); + if (desc.size() && !want_new_desc.load()) { + return desc; + } + _dump_op_descriptor(*css); + desc = css->strv(); + want_new_desc = false; + return desc; + } else { + return ret; } - return desc; } + private: - void _gen_desc() const { - std::ostringstream ss; - _dump_op_descriptor_unlocked(ss); - desc_str = ss.str(); - desc = desc_str.c_str(); - want_new_desc = false; - } + mutable ceph::mutex desc_lock = ceph::make_mutex("OpTracker::desc_lock"); + mutable std::string desc; ///< protected by desc_lock + mutable std::atomic want_new_desc = {false}; + public: void reset_desc() { want_new_desc = true; } + void dump_type(Formatter* f) const { + return _dump(f); + } + const utime_t& get_initiated() const { return initiated_at; } @@ -384,12 +399,12 @@ public: warn_interval_multiplier = 0; } - virtual std::string_view state_string() const { + std::string state_string() const { std::lock_guard l(lock); - return events.empty() ? std::string_view() : std::string_view(events.rbegin()->str); + return _get_state_string(); } - void dump(utime_t now, ceph::Formatter *f) const; + void dump(utime_t now, ceph::Formatter *f, OpTracker::dumper lambda) const; void tracking_start() { if (tracker->register_inflight_op(this)) { @@ -406,7 +421,15 @@ public: friend void intrusive_ptr_release(TrackedOp *o) { o->put(); } + +protected: + virtual std::string _get_state_string() const { + return events.empty() ? std::string() : std::string(events.rbegin()->str); + } }; +inline void OpTracker::default_dumper(const TrackedOp& op, Formatter* f) { + op._dump(f); +} #endif diff --git a/ceph/src/common/ceph_strings.cc b/ceph/src/common/ceph_strings.cc index ca044cc85..18dcc701b 100644 --- a/ceph/src/common/ceph_strings.cc +++ b/ceph/src/common/ceph_strings.cc @@ -300,6 +300,7 @@ const char *ceph_mds_op_name(int op) case CEPH_MDS_OP_MKSNAP: return "mksnap"; case CEPH_MDS_OP_RMSNAP: return "rmsnap"; case CEPH_MDS_OP_RENAMESNAP: return "renamesnap"; + case CEPH_MDS_OP_READDIR_SNAPDIFF: return "readdir_snapdiff"; case CEPH_MDS_OP_SETFILELOCK: return "setfilelock"; case CEPH_MDS_OP_GETFILELOCK: return "getfilelock"; case CEPH_MDS_OP_FRAGMENTDIR: return "fragmentdir"; diff --git a/ceph/src/common/options/mds.yaml.in b/ceph/src/common/options/mds.yaml.in index eeb895615..6eb0702fc 100644 --- a/ceph/src/common/options/mds.yaml.in +++ b/ceph/src/common/options/mds.yaml.in @@ -345,10 +345,10 @@ options: type: float level: advanced desc: decay rate for session readdir caps leading to readdir throttle - long_desc: The half-life for the session cap acquisition counter of caps acquired - by readdir. This is used for throttling readdir requests from clients slow to - release caps. - default: 10 + long_desc: The half-life for the session cap acquisition counter of caps + acquired by readdir. This is used for throttling readdir requests from + clients. + default: 30 services: - mds flags: @@ -356,8 +356,8 @@ options: - name: mds_session_cap_acquisition_throttle type: uint level: advanced - desc: throttle point for cap acquisition decay counter - default: 500000 + desc: threshold at which the cap acquisition decay counter throttles + default: 100000 services: - mds - name: mds_session_max_caps_throttle_ratio @@ -1524,3 +1524,13 @@ options: - mds flags: - runtime +- name: mds_session_metadata_threshold + type: size + level: advanced + desc: Evict non-advancing client-tid sessions exceeding the config size. + long_desc: Evict clients which are not advancing their request tids which causes a large buildup of session metadata (`completed_requests`) in the MDS causing the MDS to go read-only since the RADOS operation exceeds the size threashold. This config is the maximum size (in bytes) that a session metadata (encoded) can grow. + default: 16_M + services: + - mds + flags: + - runtime diff --git a/ceph/src/common/options/rgw.yaml.in b/ceph/src/common/options/rgw.yaml.in index 48b58ca65..241632a22 100644 --- a/ceph/src/common/options/rgw.yaml.in +++ b/ceph/src/common/options/rgw.yaml.in @@ -3241,7 +3241,7 @@ options: is very heavily loaded. Beware that increasing this value may cause some operations to take longer in exceptional cases and thus may, rarely, cause clients to time out. - default: 3 + default: 10 tags: - error recovery services: diff --git a/ceph/src/crimson/admin/admin_socket.cc b/ceph/src/crimson/admin/admin_socket.cc index 88ce8b1b7..9db91369a 100644 --- a/ceph/src/crimson/admin/admin_socket.cc +++ b/ceph/src/crimson/admin/admin_socket.cc @@ -236,6 +236,14 @@ seastar::future<> AdminSocket::start(const std::string& path) try { server_sock = seastar::engine().listen(sock_path); } catch (const std::system_error& e) { + if (e.code() == std::errc::address_in_use) { + logger().debug("{}: Admin Socket socket path={} already exists, retrying", + __func__, path); + return seastar::remove_file(path).then([this, path] { + server_sock.reset(); + return start(path); + }); + } logger().error("{}: unable to listen({}): {}", __func__, path, e.what()); server_sock.reset(); return seastar::make_ready_future<>(); diff --git a/ceph/src/crimson/common/config_proxy.h b/ceph/src/crimson/common/config_proxy.h index e27db9b5b..4c0e65507 100644 --- a/ceph/src/crimson/common/config_proxy.h +++ b/ceph/src/crimson/common/config_proxy.h @@ -96,8 +96,17 @@ public: return values.get(); } - // required by sharded<> + void get_config_bl(uint64_t have_version, + ceph::buffer::list *bl, + uint64_t *got_version) { + get_config().get_config_bl(get_config_values(), have_version, + bl, got_version); + } + void get_defaults_bl(ceph::buffer::list *bl) { + get_config().get_defaults_bl(get_config_values(), bl); + } seastar::future<> start(); + // required by sharded<> seastar::future<> stop() { return seastar::make_ready_future<>(); } diff --git a/ceph/src/crimson/common/errorator.h b/ceph/src/crimson/common/errorator.h index 705a9f052..c5d63d5b9 100644 --- a/ceph/src/crimson/common/errorator.h +++ b/ceph/src/crimson/common/errorator.h @@ -599,7 +599,9 @@ private: static_assert((... && std::is_invocable_v), "provided Error Visitor is not exhaustive"); - + static_assert(std::is_void_v ? std::is_invocable_v + : std::is_invocable_v, + "Value Func is not invocable with future's value"); using value_func_result_t = typename std::conditional_t, std::invoke_result, diff --git a/ceph/src/crimson/common/operation.h b/ceph/src/crimson/common/operation.h index f26a3e860..6df2c99fd 100644 --- a/ceph/src/crimson/common/operation.h +++ b/ceph/src/crimson/common/operation.h @@ -476,7 +476,7 @@ public: using Ref = std::unique_ptr; /// Waits for exit barrier - virtual seastar::future<> wait() = 0; + virtual std::optional> wait() = 0; /// Releases pipeline stage, can only be called after wait virtual void exit() = 0; @@ -503,8 +503,8 @@ public: class PipelineHandle { PipelineExitBarrierI::Ref barrier; - auto wait_barrier() { - return barrier ? barrier->wait() : seastar::now(); + std::optional> wait_barrier() { + return barrier ? barrier->wait() : std::nullopt; } public: @@ -525,15 +525,26 @@ public: seastar::future<> enter(T &stage, typename T::BlockingEvent::template Trigger&& t) { ceph_assert(stage.get_core() == seastar::this_shard_id()); - return wait_barrier().then([this, &stage, t=std::move(t)] () mutable { - auto fut = t.maybe_record_blocking(stage.enter(t), stage); - exit(); - return std::move(fut).then( - [this, t=std::move(t)](auto &&barrier_ref) mutable { - barrier = std::move(barrier_ref); - return seastar::now(); + auto wait_fut = wait_barrier(); + if (wait_fut.has_value()) { + return wait_fut.value().then([this, &stage, t=std::move(t)] () mutable { + auto fut = t.maybe_record_blocking(stage.enter(t), stage); + exit(); + return std::move(fut).then( + [this, t=std::move(t)](auto &&barrier_ref) mutable { + barrier = std::move(barrier_ref); + return seastar::now(); + }); }); - }); + } else { + auto fut = t.maybe_record_blocking(stage.enter(t), stage); + exit(); + return std::move(fut).then( + [this, t=std::move(t)](auto &&barrier_ref) mutable { + barrier = std::move(barrier_ref); + return seastar::now(); + }); + } } /** @@ -542,7 +553,7 @@ public: seastar::future<> complete() { auto ret = wait_barrier(); barrier.reset(); - return ret; + return ret ? std::move(ret.value()) : seastar::now(); } /** @@ -578,8 +589,8 @@ class OrderedExclusivePhaseT : public PipelineStageIT { ExitBarrier(OrderedExclusivePhaseT *phase, Operation::id_t id) : phase(phase), op_id(id) {} - seastar::future<> wait() final { - return seastar::now(); + std::optional> wait() final { + return std::nullopt; } void exit() final { @@ -681,7 +692,7 @@ private: seastar::future<> &&barrier, TriggerT& trigger) : phase(phase), barrier(std::move(barrier)), trigger(trigger) {} - seastar::future<> wait() final { + std::optional> wait() final { assert(phase); assert(barrier); auto ret = std::move(*barrier); @@ -739,8 +750,8 @@ class UnorderedStageT : public PipelineStageIT { public: ExitBarrier() = default; - seastar::future<> wait() final { - return seastar::now(); + std::optional> wait() final { + return std::nullopt; } void exit() final {} diff --git a/ceph/src/crimson/mgr/client.cc b/ceph/src/crimson/mgr/client.cc index 6e3d7cdd8..169915c9e 100644 --- a/ceph/src/crimson/mgr/client.cc +++ b/ceph/src/crimson/mgr/client.cc @@ -65,13 +65,18 @@ Client::ms_dispatch(crimson::net::ConnectionRef conn, MessageRef m) return (dispatched ? std::make_optional(seastar::now()) : std::nullopt); } -void Client::ms_handle_connect(crimson::net::ConnectionRef c) +void Client::ms_handle_connect( + crimson::net::ConnectionRef c, + seastar::shard_id prv_shard) { + ceph_assert_always(prv_shard == seastar::this_shard_id()); gate.dispatch_in_background(__func__, *this, [this, c] { if (conn == c) { // ask for the mgrconfigure message auto m = crimson::make_message(); m->daemon_name = local_conf()->name.get_id(); + local_conf().get_config_bl(0, &m->config_bl, &last_config_bl_version); + local_conf().get_defaults_bl(&m->config_defaults_bl); return conn->send(std::move(m)); } else { return seastar::now(); diff --git a/ceph/src/crimson/mgr/client.h b/ceph/src/crimson/mgr/client.h index e84575433..501949768 100644 --- a/ceph/src/crimson/mgr/client.h +++ b/ceph/src/crimson/mgr/client.h @@ -40,7 +40,7 @@ private: std::optional> ms_dispatch( crimson::net::ConnectionRef conn, Ref m) override; void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) final; - void ms_handle_connect(crimson::net::ConnectionRef conn) final; + void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id) final; seastar::future<> handle_mgr_map(crimson::net::ConnectionRef conn, Ref m); seastar::future<> handle_mgr_conf(crimson::net::ConnectionRef conn, @@ -56,6 +56,7 @@ private: crimson::net::ConnectionRef conn; seastar::timer report_timer; crimson::common::Gated gate; + uint64_t last_config_bl_version = 0; }; inline std::ostream& operator<<(std::ostream& out, const Client& client) { diff --git a/ceph/src/crimson/net/Connection.h b/ceph/src/crimson/net/Connection.h index 4c90f6e68..7141e20f4 100644 --- a/ceph/src/crimson/net/Connection.h +++ b/ceph/src/crimson/net/Connection.h @@ -40,6 +40,15 @@ class Connection : public seastar::enable_shared_from_this { virtual ~Connection() {} + /** + * get_shard_id + * + * The shard id where the Connection is dispatching events and handling I/O. + * + * May be changed with the accept/connect events. + */ + virtual const seastar::shard_id get_shard_id() const = 0; + virtual const entity_name_t &get_peer_name() const = 0; entity_type_t get_peer_type() const { return get_peer_name().type(); } @@ -71,7 +80,9 @@ class Connection : public seastar::enable_shared_from_this { * send * * Send a message over a connection that has completed its handshake. - * May be invoked from any core. + * + * May be invoked from any core, but that requires to chain the returned + * future to preserve ordering. */ virtual seastar::future<> send(MessageURef msg) = 0; @@ -81,7 +92,8 @@ class Connection : public seastar::enable_shared_from_this { * Send a keepalive message over a connection that has completed its * handshake. * - * May be invoked from any core. + * May be invoked from any core, but that requires to chain the returned + * future to preserve ordering. */ virtual seastar::future<> send_keepalive() = 0; @@ -109,9 +121,13 @@ class Connection : public seastar::enable_shared_from_this { virtual void print(std::ostream& out) const = 0; #ifdef UNIT_TESTS_BUILT - virtual bool is_closed() const = 0; + virtual bool is_protocol_ready() const = 0; + + virtual bool is_protocol_standby() const = 0; + + virtual bool is_protocol_closed() const = 0; - virtual bool is_closed_clean() const = 0; + virtual bool is_protocol_closed_clean() const = 0; virtual bool peer_wins() const = 0; #endif diff --git a/ceph/src/crimson/net/Dispatcher.h b/ceph/src/crimson/net/Dispatcher.h index cc6fd4574..9eea0a858 100644 --- a/ceph/src/crimson/net/Dispatcher.h +++ b/ceph/src/crimson/net/Dispatcher.h @@ -30,11 +30,27 @@ class Dispatcher { // used to throttle the connection if it's too busy. virtual std::optional> ms_dispatch(ConnectionRef, MessageRef) = 0; - virtual void ms_handle_accept(ConnectionRef conn) {} + // The connection is moving to the new_shard under accept/connect. + // User should not operate conn in this shard thereafter. + virtual void ms_handle_shard_change( + ConnectionRef conn, + seastar::shard_id new_shard, + bool is_accept_or_connect) {} + + // The connection is accepted or recoverred(lossless), all the followup + // events and messages will be dispatched to this shard. + // + // is_replace=true means the accepted connection has replaced + // another connecting connection with the same peer_addr, which currently only + // happens under lossy policy when both sides wish to connect to each other. + virtual void ms_handle_accept(ConnectionRef conn, seastar::shard_id prv_shard, bool is_replace) {} - virtual void ms_handle_connect(ConnectionRef conn) {} + // The connection is (re)connected, all the followup events and messages will + // be dispatched to this shard. + virtual void ms_handle_connect(ConnectionRef conn, seastar::shard_id prv_shard) {} // a reset event is dispatched when the connection is closed unexpectedly. + // // is_replace=true means the reset connection is going to be replaced by // another accepting connection with the same peer_addr, which currently only // happens under lossy policy when both sides wish to connect to each other. diff --git a/ceph/src/crimson/net/FrameAssemblerV2.cc b/ceph/src/crimson/net/FrameAssemblerV2.cc index 1b6d5a044..273a6350d 100644 --- a/ceph/src/crimson/net/FrameAssemblerV2.cc +++ b/ceph/src/crimson/net/FrameAssemblerV2.cc @@ -6,10 +6,6 @@ #include "Errors.h" #include "SocketConnection.h" -#ifdef UNIT_TESTS_BUILT -#include "Interceptor.h" -#endif - using ceph::msgr::v2::FrameAssembler; using ceph::msgr::v2::FrameError; using ceph::msgr::v2::preamble_block_t; @@ -27,25 +23,45 @@ seastar::logger& logger() { namespace crimson::net { FrameAssemblerV2::FrameAssemblerV2(SocketConnection &_conn) - : conn{_conn} -{} + : conn{_conn}, sid{seastar::this_shard_id()} +{ + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); +} + +FrameAssemblerV2::~FrameAssemblerV2() +{ + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); + assert(seastar::this_shard_id() == sid); + if (has_socket()) { + std::ignore = move_socket(); + } +} #ifdef UNIT_TESTS_BUILT // should be consistent to intercept() in ProtocolV2.cc -void FrameAssemblerV2::intercept_frame(Tag tag, bool is_write) +seastar::future<> FrameAssemblerV2::intercept_frames( + std::vector bps, + bp_type_t type) { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - if (conn.interceptor) { - auto type = is_write ? bp_type_t::WRITE : bp_type_t::READ; - auto action = conn.interceptor->intercept( - conn, Breakpoint{tag, type}); - socket->set_trap(type, action, &conn.interceptor->blocker); + if (!conn.interceptor) { + return seastar::now(); } + return conn.interceptor->intercept(conn, bps + ).then([this, type](bp_action_t action) { + return seastar::smp::submit_to( + socket->get_shard_id(), + [this, type, action] { + socket->set_trap(type, action, &conn.interceptor->blocker); + }); + }); } #endif void FrameAssemblerV2::set_is_rev1(bool _is_rev1) { + assert(seastar::this_shard_id() == sid); is_rev1 = _is_rev1; tx_frame_asm.set_is_rev1(_is_rev1); rx_frame_asm.set_is_rev1(_is_rev1); @@ -55,12 +71,14 @@ void FrameAssemblerV2::create_session_stream_handlers( const AuthConnectionMeta &auth_meta, bool crossed) { + assert(seastar::this_shard_id() == sid); session_stream_handlers = ceph::crypto::onwire::rxtx_t::create_handler_pair( nullptr, auth_meta, is_rev1, crossed); } void FrameAssemblerV2::reset_handlers() { + assert(seastar::this_shard_id() == sid); session_stream_handlers = { nullptr, nullptr }; session_comp_handlers = { nullptr, nullptr }; } @@ -68,19 +86,23 @@ void FrameAssemblerV2::reset_handlers() FrameAssemblerV2::mover_t FrameAssemblerV2::to_replace() { + assert(seastar::this_shard_id() == sid); assert(is_socket_valid()); - socket = nullptr; + + clear(); + return mover_t{ - std::move(conn.socket), + move_socket(), std::move(session_stream_handlers), std::move(session_comp_handlers)}; } seastar::future<> FrameAssemblerV2::replace_by(FrameAssemblerV2::mover_t &&mover) { - record_io = false; - rxbuf.clear(); - txbuf.clear(); + assert(seastar::this_shard_id() == sid); + + clear(); + session_stream_handlers = std::move(mover.session_stream_handlers); session_comp_handlers = std::move(mover.session_comp_handlers); if (has_socket()) { @@ -93,6 +115,7 @@ seastar::future<> FrameAssemblerV2::replace_by(FrameAssemblerV2::mover_t &&mover void FrameAssemblerV2::start_recording() { + assert(seastar::this_shard_id() == sid); record_io = true; rxbuf.clear(); txbuf.clear(); @@ -101,6 +124,7 @@ void FrameAssemblerV2::start_recording() FrameAssemblerV2::record_bufs_t FrameAssemblerV2::stop_recording() { + assert(seastar::this_shard_id() == sid); ceph_assert_always(record_io == true); record_io = false; return record_bufs_t{std::move(rxbuf), std::move(txbuf)}; @@ -109,132 +133,256 @@ FrameAssemblerV2::stop_recording() bool FrameAssemblerV2::has_socket() const { assert((socket && conn.socket) || (!socket && !conn.socket)); - return socket != nullptr; + return bool(socket); } bool FrameAssemblerV2::is_socket_valid() const { - return has_socket() && !socket->is_shutdown(); + assert(seastar::this_shard_id() == sid); +#ifndef NDEBUG + if (has_socket() && socket->get_shard_id() == sid) { + assert(socket->is_shutdown() == is_socket_shutdown); + } +#endif + return has_socket() && !is_socket_shutdown; +} + +seastar::shard_id +FrameAssemblerV2::get_socket_shard_id() const +{ + assert(seastar::this_shard_id() == sid); + assert(is_socket_valid()); + return socket->get_shard_id(); +} + +SocketFRef FrameAssemblerV2::move_socket() +{ + assert(has_socket()); + conn.set_socket(nullptr); + return std::move(socket); } -void FrameAssemblerV2::set_socket(SocketRef &&new_socket) +void FrameAssemblerV2::set_socket(SocketFRef &&new_socket) { + assert(seastar::this_shard_id() == sid); assert(!has_socket()); - socket = new_socket.get(); - conn.socket = std::move(new_socket); + assert(new_socket); + socket = std::move(new_socket); + conn.set_socket(socket.get()); + is_socket_shutdown = false; assert(is_socket_valid()); } void FrameAssemblerV2::learn_socket_ephemeral_port_as_connector(uint16_t port) { + assert(seastar::this_shard_id() == sid); assert(has_socket()); + // Note: may not invoke on the socket core socket->learn_ephemeral_port_as_connector(port); } -void FrameAssemblerV2::shutdown_socket() +template +void FrameAssemblerV2::shutdown_socket(crimson::common::Gated *gate) { + assert(seastar::this_shard_id() == sid); assert(is_socket_valid()); - socket->shutdown(); + is_socket_shutdown = true; + if constexpr (may_cross_core) { + assert(conn.get_messenger_shard_id() == sid); + assert(gate); + gate->dispatch_in_background("shutdown_socket", conn, [this] { + return seastar::smp::submit_to( + socket->get_shard_id(), [this] { + socket->shutdown(); + }); + }); + } else { + assert(socket->get_shard_id() == sid); + assert(!gate); + socket->shutdown(); + } } +template void FrameAssemblerV2::shutdown_socket(crimson::common::Gated *); +template void FrameAssemblerV2::shutdown_socket(crimson::common::Gated *); -seastar::future<> FrameAssemblerV2::replace_shutdown_socket(SocketRef &&new_socket) +seastar::future<> FrameAssemblerV2::replace_shutdown_socket(SocketFRef &&new_socket) { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - assert(socket->is_shutdown()); - socket = nullptr; - auto old_socket = std::move(conn.socket); + assert(!is_socket_valid()); + auto old_socket = move_socket(); + auto old_socket_shard_id = old_socket->get_shard_id(); set_socket(std::move(new_socket)); - return old_socket->close( - ).then([sock = std::move(old_socket)] {}); + return seastar::smp::submit_to( + old_socket_shard_id, + [old_socket = std::move(old_socket)]() mutable { + return old_socket->close( + ).then([sock = std::move(old_socket)] {}); + }); } seastar::future<> FrameAssemblerV2::close_shutdown_socket() { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - assert(socket->is_shutdown()); - return socket->close(); + assert(!is_socket_valid()); + return seastar::smp::submit_to( + socket->get_shard_id(), [this] { + return socket->close(); + }); } -seastar::future +template +seastar::future FrameAssemblerV2::read_exactly(std::size_t bytes) { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - if (unlikely(record_io)) { - return socket->read_exactly(bytes - ).then([this](auto bl) { - rxbuf.append(buffer::create(bl.share())); - return bl; + if constexpr (may_cross_core) { + assert(conn.get_messenger_shard_id() == sid); + return seastar::smp::submit_to( + socket->get_shard_id(), [this, bytes] { + return socket->read_exactly(bytes); + }).then([this](auto bptr) { + if (record_io) { + rxbuf.append(bptr); + } + return bptr; }); } else { + assert(socket->get_shard_id() == sid); return socket->read_exactly(bytes); - }; + } } +template seastar::future FrameAssemblerV2::read_exactly(std::size_t); +template seastar::future FrameAssemblerV2::read_exactly(std::size_t); +template seastar::future FrameAssemblerV2::read(std::size_t bytes) { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - if (unlikely(record_io)) { - return socket->read(bytes - ).then([this](auto buf) { - rxbuf.append(buf); + if constexpr (may_cross_core) { + assert(conn.get_messenger_shard_id() == sid); + return seastar::smp::submit_to( + socket->get_shard_id(), [this, bytes] { + return socket->read(bytes); + }).then([this](auto buf) { + if (record_io) { + rxbuf.append(buf); + } return buf; }); } else { + assert(socket->get_shard_id() == sid); return socket->read(bytes); } } +template seastar::future FrameAssemblerV2::read(std::size_t); +template seastar::future FrameAssemblerV2::read(std::size_t); +template seastar::future<> -FrameAssemblerV2::write(ceph::bufferlist &&buf) +FrameAssemblerV2::write(ceph::bufferlist buf) { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - if (unlikely(record_io)) { - txbuf.append(buf); + if constexpr (may_cross_core) { + assert(conn.get_messenger_shard_id() == sid); + if (record_io) { + txbuf.append(buf); + } + return seastar::smp::submit_to( + socket->get_shard_id(), [this, buf = std::move(buf)]() mutable { + return socket->write(std::move(buf)); + }); + } else { + assert(socket->get_shard_id() == sid); + return socket->write(std::move(buf)); } - return socket->write(std::move(buf)); } +template seastar::future<> FrameAssemblerV2::write(ceph::bufferlist); +template seastar::future<> FrameAssemblerV2::write(ceph::bufferlist); +template seastar::future<> FrameAssemblerV2::flush() { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - return socket->flush(); + if constexpr (may_cross_core) { + assert(conn.get_messenger_shard_id() == sid); + return seastar::smp::submit_to( + socket->get_shard_id(), [this] { + return socket->flush(); + }); + } else { + assert(socket->get_shard_id() == sid); + return socket->flush(); + } } +template seastar::future<> FrameAssemblerV2::flush(); +template seastar::future<> FrameAssemblerV2::flush(); +template seastar::future<> -FrameAssemblerV2::write_flush(ceph::bufferlist &&buf) +FrameAssemblerV2::write_flush(ceph::bufferlist buf) { + assert(seastar::this_shard_id() == sid); assert(has_socket()); - if (unlikely(record_io)) { - txbuf.append(buf); + if constexpr (may_cross_core) { + assert(conn.get_messenger_shard_id() == sid); + if (unlikely(record_io)) { + txbuf.append(buf); + } + return seastar::smp::submit_to( + socket->get_shard_id(), [this, buf = std::move(buf)]() mutable { + return socket->write_flush(std::move(buf)); + }); + } else { + assert(socket->get_shard_id() == sid); + return socket->write_flush(std::move(buf)); } - return socket->write_flush(std::move(buf)); } +template seastar::future<> FrameAssemblerV2::write_flush(ceph::bufferlist); +template seastar::future<> FrameAssemblerV2::write_flush(ceph::bufferlist); +template seastar::future FrameAssemblerV2::read_main_preamble() { + assert(seastar::this_shard_id() == sid); rx_preamble.clear(); - return read_exactly(rx_frame_asm.get_preamble_onwire_len() - ).then([this](auto bl) { + return read_exactly( + rx_frame_asm.get_preamble_onwire_len() + ).then([this](auto bptr) { + rx_preamble.append(std::move(bptr)); + Tag tag; try { - rx_preamble.append(buffer::create(std::move(bl))); - const Tag tag = rx_frame_asm.disassemble_preamble(rx_preamble); -#ifdef UNIT_TESTS_BUILT - intercept_frame(tag, false); -#endif - return read_main_t{tag, &rx_frame_asm}; + tag = rx_frame_asm.disassemble_preamble(rx_preamble); } catch (FrameError& e) { logger().warn("{} read_main_preamble: {}", conn, e.what()); throw std::system_error(make_error_code(crimson::net::error::negotiation_failure)); } +#ifdef UNIT_TESTS_BUILT + return intercept_frame(tag, false + ).then([this, tag] { + return read_main_t{tag, &rx_frame_asm}; + }); +#else + return read_main_t{tag, &rx_frame_asm}; +#endif }); } +template seastar::future FrameAssemblerV2::read_main_preamble(); +template seastar::future FrameAssemblerV2::read_main_preamble(); +template seastar::future FrameAssemblerV2::read_frame_payload() { + assert(seastar::this_shard_id() == sid); rx_segments_data.clear(); return seastar::do_until( [this] { @@ -250,23 +398,23 @@ FrameAssemblerV2::read_frame_payload() } uint32_t onwire_len = rx_frame_asm.get_segment_onwire_len(seg_idx); // TODO: create aligned and contiguous buffer from socket - return read_exactly(onwire_len - ).then([this](auto tmp_bl) { + return read_exactly(onwire_len + ).then([this](auto bptr) { logger().trace("{} RECV({}) frame segment[{}]", - conn, tmp_bl.size(), rx_segments_data.size()); + conn, bptr.length(), rx_segments_data.size()); bufferlist segment; - segment.append(buffer::create(std::move(tmp_bl))); + segment.append(std::move(bptr)); rx_segments_data.emplace_back(std::move(segment)); }); } ).then([this] { - return read_exactly(rx_frame_asm.get_epilogue_onwire_len()); - }).then([this](auto bl) { - logger().trace("{} RECV({}) frame epilogue", conn, bl.size()); + return read_exactly(rx_frame_asm.get_epilogue_onwire_len()); + }).then([this](auto bptr) { + logger().trace("{} RECV({}) frame epilogue", conn, bptr.length()); bool ok = false; try { bufferlist rx_epilogue; - rx_epilogue.append(buffer::create(std::move(bl))); + rx_epilogue.append(std::move(bptr)); ok = rx_frame_asm.disassemble_segments(rx_preamble, rx_segments_data.data(), rx_epilogue); } catch (FrameError& e) { logger().error("read_frame_payload: {} {}", conn, e.what()); @@ -284,6 +432,8 @@ FrameAssemblerV2::read_frame_payload() return &rx_segments_data; }); } +template seastar::future FrameAssemblerV2::read_frame_payload(); +template seastar::future FrameAssemblerV2::read_frame_payload(); void FrameAssemblerV2::log_main_preamble(const ceph::bufferlist &bl) { @@ -299,4 +449,13 @@ FrameAssemblerV2Ref FrameAssemblerV2::create(SocketConnection &conn) return std::make_unique(conn); } +void FrameAssemblerV2::clear() +{ + record_io = false; + rxbuf.clear(); + txbuf.clear(); + rx_preamble.clear(); + rx_segments_data.clear(); +} + } // namespace crimson::net diff --git a/ceph/src/crimson/net/FrameAssemblerV2.h b/ceph/src/crimson/net/FrameAssemblerV2.h index 06c5cb25e..9c89c144e 100644 --- a/ceph/src/crimson/net/FrameAssemblerV2.h +++ b/ceph/src/crimson/net/FrameAssemblerV2.h @@ -7,8 +7,13 @@ #include "msg/async/crypto_onwire.h" #include "msg/async/compression_onwire.h" +#include "crimson/common/gated.h" #include "crimson/net/Socket.h" +#ifdef UNIT_TESTS_BUILT +#include "Interceptor.h" +#endif + namespace crimson::net { class SocketConnection; @@ -19,12 +24,22 @@ class FrameAssemblerV2 { public: FrameAssemblerV2(SocketConnection &conn); - ~FrameAssemblerV2() = default; + ~FrameAssemblerV2(); FrameAssemblerV2(const FrameAssemblerV2 &) = delete; FrameAssemblerV2(FrameAssemblerV2 &&) = delete; + void set_shard_id(seastar::shard_id _sid) { + assert(seastar::this_shard_id() == sid); + clear(); + sid = _sid; + } + + seastar::shard_id get_shard_id() const { + return sid; + } + void set_is_rev1(bool is_rev1); void create_session_stream_handlers( @@ -38,7 +53,7 @@ public: */ struct mover_t { - SocketRef socket; + SocketFRef socket; ceph::crypto::onwire::rxtx_t session_stream_handlers; ceph::compression::onwire::rxtx_t session_comp_handlers; }; @@ -66,13 +81,17 @@ public: // the socket exists and not shutdown bool is_socket_valid() const; - void set_socket(SocketRef &&); + seastar::shard_id get_socket_shard_id() const; + + void set_socket(SocketFRef &&); void learn_socket_ephemeral_port_as_connector(uint16_t port); - void shutdown_socket(); + // if may_cross_core == true, gate is required for cross-core shutdown + template + void shutdown_socket(crimson::common::Gated *gate); - seastar::future<> replace_shutdown_socket(SocketRef &&); + seastar::future<> replace_shutdown_socket(SocketFRef &&); seastar::future<> close_shutdown_socket(); @@ -80,15 +99,20 @@ public: * socket read and write interfaces */ - seastar::future read_exactly(std::size_t bytes); + template + seastar::future read_exactly(std::size_t bytes); + template seastar::future read(std::size_t bytes); - seastar::future<> write(ceph::bufferlist &&); + template + seastar::future<> write(ceph::bufferlist); + template seastar::future<> flush(); - seastar::future<> write_flush(ceph::bufferlist &&); + template + seastar::future<> write_flush(ceph::bufferlist); /* * frame read and write interfaces @@ -99,46 +123,101 @@ public: ceph::msgr::v2::Tag tag; const ceph::msgr::v2::FrameAssembler *rx_frame_asm; }; + template seastar::future read_main_preamble(); /// may throw negotiation_failure as fault using read_payload_t = ceph::msgr::v2::segment_bls_t; // FIXME: read_payload_t cannot be no-throw move constructible + template seastar::future read_frame_payload(); template ceph::bufferlist get_buffer(F &tx_frame) { -#ifdef UNIT_TESTS_BUILT - intercept_frame(F::tag, true); -#endif + assert(seastar::this_shard_id() == sid); auto bl = tx_frame.get_buffer(tx_frame_asm); log_main_preamble(bl); return bl; } - template + template seastar::future<> write_flush_frame(F &tx_frame) { + assert(seastar::this_shard_id() == sid); auto bl = get_buffer(tx_frame); - return write_flush(std::move(bl)); +#ifdef UNIT_TESTS_BUILT + return intercept_frame(F::tag, true + ).then([this, bl=std::move(bl)]() mutable { + return write_flush(std::move(bl)); + }); +#else + return write_flush(std::move(bl)); +#endif } static FrameAssemblerV2Ref create(SocketConnection &conn); -private: - bool has_socket() const; +#ifdef UNIT_TESTS_BUILT + seastar::future<> intercept_frames( + std::vector tags, + bool is_write) { + auto type = is_write ? bp_type_t::WRITE : bp_type_t::READ; + std::vector bps; + for (auto &tag : tags) { + bps.emplace_back(Breakpoint{tag, type}); + } + return intercept_frames(bps, type); + } - void log_main_preamble(const ceph::bufferlist &bl); + seastar::future<> intercept_frame( + ceph::msgr::v2::Tag tag, + bool is_write) { + auto type = is_write ? bp_type_t::WRITE : bp_type_t::READ; + std::vector bps; + bps.emplace_back(Breakpoint{tag, type}); + return intercept_frames(bps, type); + } + + seastar::future<> intercept_frame( + custom_bp_t bp, + bool is_write) { + auto type = is_write ? bp_type_t::WRITE : bp_type_t::READ; + std::vector bps; + bps.emplace_back(Breakpoint{bp}); + return intercept_frames(bps, type); + } +#endif +private: #ifdef UNIT_TESTS_BUILT - void intercept_frame(ceph::msgr::v2::Tag, bool is_write); + seastar::future<> intercept_frames( + std::vector bps, + bp_type_t type); #endif + bool has_socket() const; + + SocketFRef move_socket(); + + void clear(); + + void log_main_preamble(const ceph::bufferlist &bl); + SocketConnection &conn; - Socket *socket = nullptr; + SocketFRef socket; + + // checking Socket::is_shutdown() synchronously is impossible when sid is + // different from the socket sid. + bool is_socket_shutdown = false; + + // the current working shard, can be messenger or socket shard. + // if is messenger shard, should call interfaces with may_cross_core = true. + seastar::shard_id sid; /* * auth signature + * + * only in the messenger core */ bool record_io = false; @@ -166,6 +245,10 @@ private: &session_stream_handlers, is_rev1, common::local_conf()->ms_crc_data, &session_comp_handlers}; + // in the messenger core during handshake, + // and in the socket core during open, + // must be cleaned before switching cores. + ceph::bufferlist rx_preamble; read_payload_t rx_segments_data; diff --git a/ceph/src/crimson/net/Fwd.h b/ceph/src/crimson/net/Fwd.h index 3eb57ef97..2b1595141 100644 --- a/ceph/src/crimson/net/Fwd.h +++ b/ceph/src/crimson/net/Fwd.h @@ -39,8 +39,6 @@ using ConnectionLRef = seastar::shared_ptr; using ConnectionFRef = seastar::foreign_ptr; using ConnectionRef = ::crimson::local_shared_foreign_ptr; -class SocketConnection; - class Dispatcher; class ChainedDispatchers; constexpr std::size_t NUM_DISPATCHERS = 4u; @@ -49,4 +47,6 @@ using dispatchers_t = boost::container::small_vector; +using MessageFRef = seastar::foreign_ptr; + } // namespace crimson::net diff --git a/ceph/src/crimson/net/Interceptor.h b/ceph/src/crimson/net/Interceptor.h index 41ec31f37..35b74e243 100644 --- a/ceph/src/crimson/net/Interceptor.h +++ b/ceph/src/crimson/net/Interceptor.h @@ -45,16 +45,21 @@ enum class bp_action_t { class socket_blocker { std::optional p_blocked; std::optional p_unblocked; + const seastar::shard_id primary_sid; public: + socket_blocker() : primary_sid{seastar::this_shard_id()} {} + seastar::future<> wait_blocked() { + ceph_assert(seastar::this_shard_id() == primary_sid); ceph_assert(!p_blocked); if (p_unblocked) { return seastar::make_ready_future<>(); } else { p_blocked = seastar::abort_source(); - return seastar::sleep_abortable(std::chrono::seconds(10), - *p_blocked).then([] { + return seastar::sleep_abortable( + std::chrono::seconds(10), *p_blocked + ).then([] { throw std::runtime_error( "Timeout (10s) in socket_blocker::wait_blocked()"); }).handle_exception_type([] (const seastar::sleep_aborted& e) { @@ -64,21 +69,25 @@ class socket_blocker { } seastar::future<> block() { - if (p_blocked) { - p_blocked->request_abort(); - p_blocked = std::nullopt; - } - ceph_assert(!p_unblocked); - p_unblocked = seastar::abort_source(); - return seastar::sleep_abortable(std::chrono::seconds(10), - *p_unblocked).then([] { - ceph_abort("Timeout (10s) in socket_blocker::block()"); - }).handle_exception_type([] (const seastar::sleep_aborted& e) { - // wait done! + return seastar::smp::submit_to(primary_sid, [this] { + if (p_blocked) { + p_blocked->request_abort(); + p_blocked = std::nullopt; + } + ceph_assert(!p_unblocked); + p_unblocked = seastar::abort_source(); + return seastar::sleep_abortable( + std::chrono::seconds(10), *p_unblocked + ).then([] { + ceph_abort("Timeout (10s) in socket_blocker::block()"); + }).handle_exception_type([] (const seastar::sleep_aborted& e) { + // wait done! + }); }); } void unblock() { + ceph_assert(seastar::this_shard_id() == primary_sid); ceph_assert(!p_blocked); ceph_assert(p_unblocked); p_unblocked->request_abort(); @@ -116,11 +125,13 @@ struct Breakpoint { struct Interceptor { socket_blocker blocker; virtual ~Interceptor() {} - virtual void register_conn(SocketConnection& conn) = 0; - virtual void register_conn_ready(SocketConnection& conn) = 0; - virtual void register_conn_closed(SocketConnection& conn) = 0; - virtual void register_conn_replaced(SocketConnection& conn) = 0; - virtual bp_action_t intercept(SocketConnection& conn, Breakpoint bp) = 0; + virtual void register_conn(ConnectionRef) = 0; + virtual void register_conn_ready(ConnectionRef) = 0; + virtual void register_conn_closed(ConnectionRef) = 0; + virtual void register_conn_replaced(ConnectionRef) = 0; + + virtual seastar::future + intercept(Connection&, std::vector bp) = 0; }; } // namespace crimson::net diff --git a/ceph/src/crimson/net/Messenger.cc b/ceph/src/crimson/net/Messenger.cc index aab476f7a..1af198589 100644 --- a/ceph/src/crimson/net/Messenger.cc +++ b/ceph/src/crimson/net/Messenger.cc @@ -9,9 +9,11 @@ namespace crimson::net { MessengerRef Messenger::create(const entity_name_t& name, const std::string& lname, - const uint64_t nonce) + uint64_t nonce, + bool dispatch_only_on_this_shard) { - return seastar::make_shared(name, lname, nonce); + return seastar::make_shared( + name, lname, nonce, dispatch_only_on_this_shard); } } // namespace crimson::net diff --git a/ceph/src/crimson/net/Messenger.h b/ceph/src/crimson/net/Messenger.h index e2fba3257..74df062d8 100644 --- a/ceph/src/crimson/net/Messenger.h +++ b/ceph/src/crimson/net/Messenger.h @@ -108,7 +108,8 @@ public: static MessengerRef create(const entity_name_t& name, const std::string& lname, - const uint64_t nonce); + uint64_t nonce, + bool dispatch_only_on_this_shard); #ifdef UNIT_TESTS_BUILT virtual void set_interceptor(Interceptor *) = 0; diff --git a/ceph/src/crimson/net/ProtocolV2.cc b/ceph/src/crimson/net/ProtocolV2.cc index 95b756637..55b669384 100644 --- a/ceph/src/crimson/net/ProtocolV2.cc +++ b/ceph/src/crimson/net/ProtocolV2.cc @@ -17,14 +17,8 @@ #include "Errors.h" #include "SocketMessenger.h" -#ifdef UNIT_TESTS_BUILT -#include "Interceptor.h" -#endif - using namespace ceph::msgr::v2; using crimson::common::local_conf; -using io_state_t = crimson::net::IOHandler::io_state_t; -using io_stat_printer = crimson::net::IOHandler::io_stat_printer; namespace { @@ -103,26 +97,6 @@ inline uint64_t generate_client_cookie() { namespace crimson::net { -#ifdef UNIT_TESTS_BUILT -// should be consistent to intercept_frame() in FrameAssemblerV2.cc -void intercept(Breakpoint bp, - bp_type_t type, - SocketConnection& conn, - Interceptor *interceptor, - SocketRef& socket) { - if (interceptor) { - auto action = interceptor->intercept(conn, Breakpoint(bp)); - socket->set_trap(type, action, &interceptor->blocker); - } -} - -#define INTERCEPT_CUSTOM(bp, type) \ -intercept({bp}, type, conn, \ - conn.interceptor, conn.socket) -#else -#define INTERCEPT_CUSTOM(bp, type) -#endif - seastar::future<> ProtocolV2::Timer::backoff(double seconds) { logger().warn("{} waiting {} seconds ...", conn, seconds); @@ -146,13 +120,16 @@ ProtocolV2::ProtocolV2(SocketConnection& conn, frame_assembler{FrameAssemblerV2::create(conn)}, auth_meta{seastar::make_lw_shared()}, protocol_timer{conn} -{} +{ + io_states = io_handler.get_states(); +} ProtocolV2::~ProtocolV2() {} void ProtocolV2::start_connect(const entity_addr_t& _peer_addr, const entity_name_t& _peer_name) { + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); ceph_assert(state == state_t::NONE); ceph_assert(!gate.is_closed()); conn.peer_addr = _peer_addr; @@ -170,9 +147,10 @@ void ProtocolV2::start_connect(const entity_addr_t& _peer_addr, execute_connecting(); } -void ProtocolV2::start_accept(SocketRef&& new_socket, +void ProtocolV2::start_accept(SocketFRef&& new_socket, const entity_addr_t& _peer_addr) { + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); ceph_assert(state == state_t::NONE); // until we know better conn.target_addr = _peer_addr; @@ -182,12 +160,22 @@ void ProtocolV2::start_accept(SocketRef&& new_socket, logger().info("{} ProtocolV2::start_accept(): target_addr={}", conn, _peer_addr); messenger.accept_conn( seastar::static_pointer_cast(conn.shared_from_this())); + + auto cc_seq = crosscore.prepare_submit(); + gate.dispatch_in_background("set_accepted_sid", conn, [this, cc_seq] { + return io_handler.set_accepted_sid( + cc_seq, + frame_assembler->get_socket_shard_id(), + seastar::make_foreign(conn.shared_from_this())); + }); + execute_accepting(); } -void ProtocolV2::trigger_state(state_t new_state, io_state_t new_io_state, bool reentrant) +void ProtocolV2::trigger_state_phase1(state_t new_state) { - if (!reentrant && new_state == state) { + ceph_assert_always(!gate.is_closed()); + if (new_state == state) { logger().error("{} is not allowed to re-trigger state {}", conn, get_state_name(state)); ceph_abort(); @@ -199,32 +187,84 @@ void ProtocolV2::trigger_state(state_t new_state, io_state_t new_io_state, bool } logger().debug("{} TRIGGER {}, was {}", conn, get_state_name(new_state), get_state_name(state)); - auto pre_state = state; - if (pre_state == state_t::READY) { - assert(!gate.is_closed()); - ceph_assert_always(!exit_io.has_value()); - exit_io = seastar::shared_promise<>(); + + if (state == state_t::READY) { + // from READY + ceph_assert_always(!need_exit_io); + ceph_assert_always(!pr_exit_io.has_value()); + need_exit_io = true; + pr_exit_io = seastar::shared_promise<>(); } + + if (new_state == state_t::STANDBY && !conn.policy.server) { + need_notify_out = true; + } else { + need_notify_out = false; + } + state = new_state; +} + +void ProtocolV2::trigger_state_phase2( + state_t new_state, io_state_t new_io_state) +{ + ceph_assert_always(new_state == state); + ceph_assert_always(!gate.is_closed()); + ceph_assert_always(!pr_switch_io_shard.has_value()); + + FrameAssemblerV2Ref fa; if (new_state == state_t::READY) { - // I'm not responsible to shutdown the socket at READY - is_socket_valid = false; - io_handler.set_io_state(new_io_state, std::move(frame_assembler)); + assert(new_io_state == io_state_t::open); + assert(io_handler.get_shard_id() == + frame_assembler->get_socket_shard_id()); + frame_assembler->set_shard_id(io_handler.get_shard_id()); + fa = std::move(frame_assembler); } else { - io_handler.set_io_state(new_io_state, nullptr); + assert(new_io_state != io_state_t::open); } - /* - * not atomic below - */ + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} send {} IOHandler::set_io_state(): new_state={}, new_io_state={}, " + "fa={}, set_notify_out={}", + conn, cc_seq, get_state_name(new_state), new_io_state, + fa ? fmt::format("(sid={})", fa->get_shard_id()) : "N/A", + need_notify_out); + gate.dispatch_in_background( + "set_io_state", conn, + [this, cc_seq, new_io_state, fa=std::move(fa)]() mutable { + return seastar::smp::submit_to( + io_handler.get_shard_id(), + [this, cc_seq, new_io_state, + fa=std::move(fa), set_notify_out=need_notify_out]() mutable { + return io_handler.set_io_state( + cc_seq, new_io_state, std::move(fa), set_notify_out); + }); + }); - if (pre_state == state_t::READY) { - gate.dispatch_in_background("exit_io", conn, [this] { - return io_handler.wait_io_exit_dispatching( - ).then([this](FrameAssemblerV2Ref fa) { - frame_assembler = std::move(fa); - exit_io->set_value(); - exit_io = std::nullopt; + if (need_exit_io) { + // from READY + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} send {} IOHandler::wait_io_exit_dispatching() ...", + conn, cc_seq); + assert(pr_exit_io.has_value()); + assert(new_io_state != io_state_t::open); + need_exit_io = false; + gate.dispatch_in_background("exit_io", conn, [this, cc_seq] { + return seastar::smp::submit_to( + io_handler.get_shard_id(), [this, cc_seq] { + return io_handler.wait_io_exit_dispatching(cc_seq); + }).then([this, cc_seq](auto ret) { + logger().debug("{} finish {} IOHandler::wait_io_exit_dispatching(), {}", + conn, cc_seq, ret.io_states); + frame_assembler = std::move(ret.frame_assembler); + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); + ceph_assert_always( + seastar::this_shard_id() == frame_assembler->get_shard_id()); + ceph_assert_always(!frame_assembler->is_socket_valid()); + assert(!need_exit_io); + io_states = ret.io_states; + pr_exit_io->set_value(); + pr_exit_io = std::nullopt; }); }); } @@ -281,7 +321,7 @@ void ProtocolV2::fault( if (likely(has_socket)) { if (likely(is_socket_valid)) { ceph_assert_always(state != state_t::READY); - frame_assembler->shutdown_socket(); + frame_assembler->shutdown_socket(&gate); is_socket_valid = false; } else { ceph_assert_always(state != state_t::ESTABLISHING); @@ -292,20 +332,20 @@ void ProtocolV2::fault( } if (conn.policy.server || - (conn.policy.standby && !io_handler.is_out_queued_or_sent())) { + (conn.policy.standby && !io_states.is_out_queued_or_sent())) { if (conn.policy.server) { logger().info("{} protocol {} {} fault as server, going to STANDBY {} -- {}", conn, get_state_name(state), where, - io_stat_printer{io_handler}, + io_states, e_what); } else { logger().info("{} protocol {} {} fault with nothing to send, going to STANDBY {} -- {}", conn, get_state_name(state), where, - io_stat_printer{io_handler}, + io_states, e_what); } execute_standby(); @@ -315,7 +355,7 @@ void ProtocolV2::fault( conn, get_state_name(state), where, - io_stat_printer{io_handler}, + io_states, e_what); execute_wait(false); } else { @@ -325,7 +365,7 @@ void ProtocolV2::fault( conn, get_state_name(state), where, - io_stat_printer{io_handler}, + io_states, e_what); execute_connecting(); } @@ -339,7 +379,19 @@ void ProtocolV2::reset_session(bool full) client_cookie = generate_client_cookie(); peer_global_seq = 0; } - io_handler.reset_session(full); + + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} send {} IOHandler::reset_session({})", + conn, cc_seq, full); + io_states.reset_session(full); + gate.dispatch_in_background( + "reset_session", conn, [this, cc_seq, full] { + return seastar::smp::submit_to( + io_handler.get_shard_id(), [this, cc_seq, full] { + return io_handler.reset_session(cc_seq, full); + }); + }); + // user can make changes } seastar::future> @@ -361,98 +413,121 @@ ProtocolV2::banner_exchange(bool is_connect) CRIMSON_MSGR2_SUPPORTED_FEATURES, CEPH_MSGR2_REQUIRED_FEATURES, CEPH_BANNER_V2_PREFIX); - INTERCEPT_CUSTOM(custom_bp_t::BANNER_WRITE, bp_type_t::WRITE); - return frame_assembler->write_flush(std::move(bl)).then([this] { - // 2. read peer banner - unsigned banner_len = strlen(CEPH_BANNER_V2_PREFIX) + sizeof(ceph_le16); - INTERCEPT_CUSTOM(custom_bp_t::BANNER_READ, bp_type_t::READ); - return frame_assembler->read_exactly(banner_len); // or read exactly? - }).then([this] (auto bl) { - // 3. process peer banner and read banner_payload - unsigned banner_prefix_len = strlen(CEPH_BANNER_V2_PREFIX); - logger().debug("{} RECV({}) banner: \"{}\"", - conn, bl.size(), - std::string((const char*)bl.get(), banner_prefix_len)); - - if (memcmp(bl.get(), CEPH_BANNER_V2_PREFIX, banner_prefix_len) != 0) { - if (memcmp(bl.get(), CEPH_BANNER, strlen(CEPH_BANNER)) == 0) { - logger().warn("{} peer is using V1 protocol", conn); - } else { - logger().warn("{} peer sent bad banner", conn); - } - abort_in_fault(); +#ifdef UNIT_TESTS_BUILT + return frame_assembler->intercept_frame(custom_bp_t::BANNER_WRITE, true + ).then([this, bl=std::move(bl)]() mutable { + return frame_assembler->write_flush(std::move(bl)); + } +#else + return frame_assembler->write_flush(std::move(bl) +#endif + ).then([this] { + // 2. read peer banner + unsigned banner_len = strlen(CEPH_BANNER_V2_PREFIX) + sizeof(ceph_le16); +#ifdef UNIT_TESTS_BUILT + return frame_assembler->intercept_frame(custom_bp_t::BANNER_READ, false + ).then([this, banner_len] { + return frame_assembler->read_exactly(banner_len); + }); +#else + return frame_assembler->read_exactly(banner_len); +#endif + }).then([this](auto bptr) { + // 3. process peer banner and read banner_payload + unsigned banner_prefix_len = strlen(CEPH_BANNER_V2_PREFIX); + logger().debug("{} RECV({}) banner: \"{}\"", + conn, bptr.length(), + std::string(bptr.c_str(), banner_prefix_len)); + + if (memcmp(bptr.c_str(), CEPH_BANNER_V2_PREFIX, banner_prefix_len) != 0) { + if (memcmp(bptr.c_str(), CEPH_BANNER, strlen(CEPH_BANNER)) == 0) { + logger().warn("{} peer is using V1 protocol", conn); + } else { + logger().warn("{} peer sent bad banner", conn); } - bl.trim_front(banner_prefix_len); + abort_in_fault(); + } - uint16_t payload_len; - bufferlist buf; - buf.append(buffer::create(std::move(bl))); - auto ti = buf.cbegin(); - try { - decode(payload_len, ti); - } catch (const buffer::error &e) { - logger().warn("{} decode banner payload len failed", conn); - abort_in_fault(); - } - logger().debug("{} GOT banner: payload_len={}", conn, payload_len); - INTERCEPT_CUSTOM(custom_bp_t::BANNER_PAYLOAD_READ, bp_type_t::READ); + bptr.set_offset(bptr.offset() + banner_prefix_len); + bptr.set_length(bptr.length() - banner_prefix_len); + assert(bptr.length() == sizeof(ceph_le16)); + + uint16_t payload_len; + bufferlist buf; + buf.append(std::move(bptr)); + auto ti = buf.cbegin(); + try { + decode(payload_len, ti); + } catch (const buffer::error &e) { + logger().warn("{} decode banner payload len failed", conn); + abort_in_fault(); + } + logger().debug("{} GOT banner: payload_len={}", conn, payload_len); +#ifdef UNIT_TESTS_BUILT + return frame_assembler->intercept_frame( + custom_bp_t::BANNER_PAYLOAD_READ, false + ).then([this, payload_len] { return frame_assembler->read(payload_len); - }).then([this, is_connect] (bufferlist bl) { - // 4. process peer banner_payload and send HelloFrame - auto p = bl.cbegin(); - uint64_t _peer_supported_features; - uint64_t _peer_required_features; - try { - decode(_peer_supported_features, p); - decode(_peer_required_features, p); - } catch (const buffer::error &e) { - logger().warn("{} decode banner payload failed", conn); - abort_in_fault(); - } - logger().debug("{} RECV({}) banner features: supported={} required={}", - conn, bl.length(), - _peer_supported_features, _peer_required_features); - - // Check feature bit compatibility - uint64_t supported_features = CRIMSON_MSGR2_SUPPORTED_FEATURES; - uint64_t required_features = CEPH_MSGR2_REQUIRED_FEATURES; - if ((required_features & _peer_supported_features) != required_features) { - logger().error("{} peer does not support all required features" - " required={} peer_supported={}", - conn, required_features, _peer_supported_features); - ABORT_IN_CLOSE(is_connect); - } - if ((supported_features & _peer_required_features) != _peer_required_features) { - logger().error("{} we do not support all peer required features" - " peer_required={} supported={}", - conn, _peer_required_features, supported_features); - ABORT_IN_CLOSE(is_connect); - } - peer_supported_features = _peer_supported_features; - bool is_rev1 = HAVE_MSGR2_FEATURE(peer_supported_features, REVISION_1); - frame_assembler->set_is_rev1(is_rev1); - - auto hello = HelloFrame::Encode(messenger.get_mytype(), - conn.target_addr); - logger().debug("{} WRITE HelloFrame: my_type={}, peer_addr={}", - conn, ceph_entity_type_name(messenger.get_mytype()), - conn.target_addr); - return frame_assembler->write_flush_frame(hello); - }).then([this] { - //5. read peer HelloFrame - return frame_assembler->read_main_preamble(); - }).then([this](auto ret) { - expect_tag(Tag::HELLO, ret.tag, conn, "read_hello_frame"); - return frame_assembler->read_frame_payload(); - }).then([this](auto payload) { - // 6. process peer HelloFrame - auto hello = HelloFrame::Decode(payload->back()); - logger().debug("{} GOT HelloFrame: my_type={} peer_addr={}", - conn, ceph_entity_type_name(hello.entity_type()), - hello.peer_addr()); - return seastar::make_ready_future>( - std::make_tuple(hello.entity_type(), hello.peer_addr())); }); +#else + return frame_assembler->read(payload_len); +#endif + }).then([this, is_connect] (bufferlist bl) { + // 4. process peer banner_payload and send HelloFrame + auto p = bl.cbegin(); + uint64_t _peer_supported_features; + uint64_t _peer_required_features; + try { + decode(_peer_supported_features, p); + decode(_peer_required_features, p); + } catch (const buffer::error &e) { + logger().warn("{} decode banner payload failed", conn); + abort_in_fault(); + } + logger().debug("{} RECV({}) banner features: supported={} required={}", + conn, bl.length(), + _peer_supported_features, _peer_required_features); + + // Check feature bit compatibility + uint64_t supported_features = CRIMSON_MSGR2_SUPPORTED_FEATURES; + uint64_t required_features = CEPH_MSGR2_REQUIRED_FEATURES; + if ((required_features & _peer_supported_features) != required_features) { + logger().error("{} peer does not support all required features" + " required={} peer_supported={}", + conn, required_features, _peer_supported_features); + ABORT_IN_CLOSE(is_connect); + } + if ((supported_features & _peer_required_features) != _peer_required_features) { + logger().error("{} we do not support all peer required features" + " peer_required={} supported={}", + conn, _peer_required_features, supported_features); + ABORT_IN_CLOSE(is_connect); + } + peer_supported_features = _peer_supported_features; + bool is_rev1 = HAVE_MSGR2_FEATURE(peer_supported_features, REVISION_1); + frame_assembler->set_is_rev1(is_rev1); + + auto hello = HelloFrame::Encode(messenger.get_mytype(), + conn.target_addr); + logger().debug("{} WRITE HelloFrame: my_type={}, peer_addr={}", + conn, ceph_entity_type_name(messenger.get_mytype()), + conn.target_addr); + return frame_assembler->write_flush_frame(hello); + }).then([this] { + //5. read peer HelloFrame + return frame_assembler->read_main_preamble(); + }).then([this](auto ret) { + expect_tag(Tag::HELLO, ret.tag, conn, "read_hello_frame"); + return frame_assembler->read_frame_payload(); + }).then([this](auto payload) { + // 6. process peer HelloFrame + auto hello = HelloFrame::Decode(payload->back()); + logger().debug("{} GOT HelloFrame: my_type={} peer_addr={}", + conn, ceph_entity_type_name(hello.entity_type()), + hello.peer_addr()); + return seastar::make_ready_future>( + std::make_tuple(hello.entity_type(), hello.peer_addr())); + }); } // CONNECTING state @@ -616,8 +691,25 @@ ProtocolV2::client_connect() case Tag::SERVER_IDENT: return frame_assembler->read_frame_payload( ).then([this](auto payload) { + if (unlikely(state != state_t::CONNECTING)) { + logger().debug("{} triggered {} at receiving SERVER_IDENT", + conn, get_state_name(state)); + abort_protocol(); + } + // handle_server_ident() logic - io_handler.requeue_out_sent(); + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} send {} IOHandler::requeue_out_sent()", + conn, cc_seq); + io_states.requeue_out_sent(); + gate.dispatch_in_background( + "requeue_out_sent", conn, [this, cc_seq] { + return seastar::smp::submit_to( + io_handler.get_shard_id(), [this, cc_seq] { + return io_handler.requeue_out_sent(cc_seq); + }); + }); + auto server_ident = ServerIdentFrame::Decode(payload->back()); logger().debug("{} GOT ServerIdentFrame:" " addrs={}, gid={}, gs={}," @@ -693,12 +785,12 @@ ProtocolV2::client_reconnect() server_cookie, global_seq, connect_seq, - io_handler.get_in_seq()); + io_states.in_seq); logger().debug("{} WRITE ReconnectFrame: addrs={}, client_cookie={}," " server_cookie={}, gs={}, cs={}, in_seq={}", conn, messenger.get_myaddrs(), client_cookie, server_cookie, - global_seq, connect_seq, io_handler.get_in_seq()); + global_seq, connect_seq, io_states.in_seq); return frame_assembler->write_flush_frame(reconnect).then([this] { return frame_assembler->read_main_preamble(); }).then([this](auto ret) { @@ -736,7 +828,10 @@ ProtocolV2::client_reconnect() // handle_session_reset() logic auto reset = ResetFrame::Decode(payload->back()); logger().warn("{} GOT ResetFrame: full={}", conn, reset.full()); + reset_session(reset.full()); + // user can make changes + return client_connect(); }); case Tag::WAIT: @@ -744,11 +839,29 @@ ProtocolV2::client_reconnect() case Tag::SESSION_RECONNECT_OK: return frame_assembler->read_frame_payload( ).then([this](auto payload) { + if (unlikely(state != state_t::CONNECTING)) { + logger().debug("{} triggered {} at receiving RECONNECT_OK", + conn, get_state_name(state)); + abort_protocol(); + } + // handle_reconnect_ok() logic auto reconnect_ok = ReconnectOkFrame::Decode(payload->back()); - logger().debug("{} GOT ReconnectOkFrame: msg_seq={}", - conn, reconnect_ok.msg_seq()); - io_handler.requeue_out_sent_up_to(reconnect_ok.msg_seq()); + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} GOT ReconnectOkFrame: msg_seq={}, " + "send {} IOHandler::requeue_out_sent_up_to()", + conn, reconnect_ok.msg_seq(), cc_seq); + + io_states.requeue_out_sent_up_to(); + auto msg_seq = reconnect_ok.msg_seq(); + gate.dispatch_in_background( + "requeue_out_reconnecting", conn, [this, cc_seq, msg_seq] { + return seastar::smp::submit_to( + io_handler.get_shard_id(), [this, cc_seq, msg_seq] { + return io_handler.requeue_out_sent_up_to(cc_seq, msg_seq); + }); + }); + return seastar::make_ready_future(next_step_t::ready); }); default: { @@ -762,148 +875,179 @@ ProtocolV2::client_reconnect() void ProtocolV2::execute_connecting() { ceph_assert_always(!is_socket_valid); - trigger_state(state_t::CONNECTING, io_state_t::delay, false); + trigger_state(state_t::CONNECTING, io_state_t::delay); gated_execute("execute_connecting", conn, [this] { - global_seq = messenger.get_global_seq(); - assert(client_cookie != 0); - if (!conn.policy.lossy && server_cookie != 0) { - ++connect_seq; - logger().debug("{} UPDATE: gs={}, cs={} for reconnect", - conn, global_seq, connect_seq); - } else { // conn.policy.lossy || server_cookie == 0 - assert(connect_seq == 0); - assert(server_cookie == 0); - logger().debug("{} UPDATE: gs={} for connect", conn, global_seq); - } - return wait_exit_io().then([this] { + global_seq = messenger.get_global_seq(); + assert(client_cookie != 0); + if (!conn.policy.lossy && server_cookie != 0) { + ++connect_seq; + logger().debug("{} UPDATE: gs={}, cs={} for reconnect", + conn, global_seq, connect_seq); + } else { // conn.policy.lossy || server_cookie == 0 + assert(connect_seq == 0); + assert(server_cookie == 0); + logger().debug("{} UPDATE: gs={} for connect", conn, global_seq); + } + return wait_exit_io().then([this] { #ifdef UNIT_TESTS_BUILT - // process custom_bp_t::SOCKET_CONNECTING - // supports CONTINUE/FAULT/BLOCK - if (conn.interceptor) { - auto action = conn.interceptor->intercept( - conn, {custom_bp_t::SOCKET_CONNECTING}); - switch (action) { - case bp_action_t::CONTINUE: - return seastar::now(); - case bp_action_t::FAULT: - logger().info("[Test] got FAULT"); - abort_in_fault(); - case bp_action_t::BLOCK: - logger().info("[Test] got BLOCK"); - return conn.interceptor->blocker.block(); - default: - ceph_abort("unexpected action from trap"); - } - } else { - return seastar::now(); - } - }).then([this] { -#endif - ceph_assert_always(frame_assembler); - if (unlikely(state != state_t::CONNECTING)) { - logger().debug("{} triggered {} before Socket::connect()", - conn, get_state_name(state)); - abort_protocol(); - } - return Socket::connect(conn.peer_addr); - }).then([this](SocketRef new_socket) { - logger().debug("{} socket connected", conn); - if (unlikely(state != state_t::CONNECTING)) { - logger().debug("{} triggered {} during Socket::connect()", - conn, get_state_name(state)); - return new_socket->close().then([sock=std::move(new_socket)] { - abort_protocol(); - }); - } - if (!has_socket) { - frame_assembler->set_socket(std::move(new_socket)); - has_socket = true; - } else { - gate.dispatch_in_background( - "replace_socket_connecting", - conn, - [this, new_socket=std::move(new_socket)]() mutable { - return frame_assembler->replace_shutdown_socket(std::move(new_socket)); - } - ); - } - is_socket_valid = true; + // process custom_bp_t::SOCKET_CONNECTING + // supports CONTINUE/FAULT/BLOCK + if (!conn.interceptor) { + return seastar::now(); + } + return conn.interceptor->intercept( + conn, {Breakpoint{custom_bp_t::SOCKET_CONNECTING}} + ).then([this](bp_action_t action) { + switch (action) { + case bp_action_t::CONTINUE: return seastar::now(); - }).then([this] { - auth_meta = seastar::make_lw_shared(); - frame_assembler->reset_handlers(); - frame_assembler->start_recording(); - return banner_exchange(true); - }).then([this] (auto&& ret) { - auto [_peer_type, _my_addr_from_peer] = std::move(ret); - if (conn.get_peer_type() != _peer_type) { - logger().warn("{} connection peer type does not match what peer advertises {} != {}", - conn, ceph_entity_type_name(conn.get_peer_type()), - ceph_entity_type_name(_peer_type)); - ABORT_IN_CLOSE(true); - } - if (unlikely(state != state_t::CONNECTING)) { - logger().debug("{} triggered {} during banner_exchange(), abort", - conn, get_state_name(state)); - abort_protocol(); - } - frame_assembler->learn_socket_ephemeral_port_as_connector( - _my_addr_from_peer.get_port()); - if (unlikely(_my_addr_from_peer.is_legacy())) { - logger().warn("{} peer sent a legacy address for me: {}", - conn, _my_addr_from_peer); - throw std::system_error( - make_error_code(crimson::net::error::bad_peer_address)); - } - _my_addr_from_peer.set_type(entity_addr_t::TYPE_MSGR2); - messenger.learned_addr(_my_addr_from_peer, conn); - return client_auth(); - }).then([this] { - if (server_cookie == 0) { - ceph_assert(connect_seq == 0); - return client_connect(); - } else { - ceph_assert(connect_seq > 0); - return client_reconnect(); + case bp_action_t::FAULT: + logger().info("[Test] got FAULT"); + abort_in_fault(); + case bp_action_t::BLOCK: + logger().info("[Test] got BLOCK"); + return conn.interceptor->blocker.block(); + default: + ceph_abort("unexpected action from trap"); + return seastar::now(); + } + });; + }).then([this] { +#endif + ceph_assert_always(frame_assembler); + if (unlikely(state != state_t::CONNECTING)) { + logger().debug("{} triggered {} before Socket::connect()", + conn, get_state_name(state)); + abort_protocol(); + } + return Socket::connect(conn.peer_addr); + }).then([this](SocketRef _new_socket) { + logger().debug("{} socket connected", conn); + if (unlikely(state != state_t::CONNECTING)) { + logger().debug("{} triggered {} during Socket::connect()", + conn, get_state_name(state)); + return _new_socket->close().then([sock=std::move(_new_socket)] { + abort_protocol(); + }); + } + SocketFRef new_socket = seastar::make_foreign(std::move(_new_socket)); + if (!has_socket) { + frame_assembler->set_socket(std::move(new_socket)); + has_socket = true; + } else { + gate.dispatch_in_background( + "replace_socket_connecting", + conn, + [this, new_socket=std::move(new_socket)]() mutable { + return frame_assembler->replace_shutdown_socket(std::move(new_socket)); } - }).then([this] (next_step_t next) { + ); + } + is_socket_valid = true; + return seastar::now(); + }).then([this] { + auth_meta = seastar::make_lw_shared(); + frame_assembler->reset_handlers(); + frame_assembler->start_recording(); + return banner_exchange(true); + }).then([this] (auto&& ret) { + auto [_peer_type, _my_addr_from_peer] = std::move(ret); + if (conn.get_peer_type() != _peer_type) { + logger().warn("{} connection peer type does not match what peer advertises {} != {}", + conn, ceph_entity_type_name(conn.get_peer_type()), + ceph_entity_type_name(_peer_type)); + ABORT_IN_CLOSE(true); + } + if (unlikely(state != state_t::CONNECTING)) { + logger().debug("{} triggered {} during banner_exchange(), abort", + conn, get_state_name(state)); + abort_protocol(); + } + frame_assembler->learn_socket_ephemeral_port_as_connector( + _my_addr_from_peer.get_port()); + if (unlikely(_my_addr_from_peer.is_legacy())) { + logger().warn("{} peer sent a legacy address for me: {}", + conn, _my_addr_from_peer); + throw std::system_error( + make_error_code(crimson::net::error::bad_peer_address)); + } + _my_addr_from_peer.set_type(entity_addr_t::TYPE_MSGR2); + messenger.learned_addr(_my_addr_from_peer, conn); + return client_auth(); + }).then([this] { + if (server_cookie == 0) { + ceph_assert(connect_seq == 0); + return client_connect(); + } else { + ceph_assert(connect_seq > 0); + return client_reconnect(); + } + }).then([this] (next_step_t next) { + if (unlikely(state != state_t::CONNECTING)) { + logger().debug("{} triggered {} at the end of execute_connecting()", + conn, get_state_name(state)); + abort_protocol(); + } + switch (next) { + case next_step_t::ready: { + if (unlikely(state != state_t::CONNECTING)) { + logger().debug("{} triggered {} before dispatch_connect(), abort", + conn, get_state_name(state)); + abort_protocol(); + } + + auto cc_seq = crosscore.prepare_submit(); + // there are 2 hops with dispatch_connect() + crosscore.prepare_submit(); + logger().info("{} connected: gs={}, pgs={}, cs={}, " + "client_cookie={}, server_cookie={}, {}, new_sid={}, " + "send {} IOHandler::dispatch_connect()", + conn, global_seq, peer_global_seq, connect_seq, + client_cookie, server_cookie, io_states, + frame_assembler->get_socket_shard_id(), cc_seq); + + // set io_handler to a new shard + auto new_io_shard = frame_assembler->get_socket_shard_id(); + ConnectionFRef conn_fref = seastar::make_foreign( + conn.shared_from_this()); + ceph_assert_always(!pr_switch_io_shard.has_value()); + pr_switch_io_shard = seastar::shared_promise<>(); + return seastar::smp::submit_to( + io_handler.get_shard_id(), + [this, cc_seq, new_io_shard, + conn_fref=std::move(conn_fref)]() mutable { + return io_handler.dispatch_connect( + cc_seq, new_io_shard, std::move(conn_fref)); + }).then([this, new_io_shard] { + ceph_assert_always(io_handler.get_shard_id() == new_io_shard); + pr_switch_io_shard->set_value(); + pr_switch_io_shard = std::nullopt; + // user can make changes + if (unlikely(state != state_t::CONNECTING)) { - logger().debug("{} triggered {} at the end of execute_connecting()", + logger().debug("{} triggered {} after dispatch_connect(), abort", conn, get_state_name(state)); abort_protocol(); } - switch (next) { - case next_step_t::ready: { - logger().info("{} connected: gs={}, pgs={}, cs={}, " - "client_cookie={}, server_cookie={}, {}", - conn, global_seq, peer_global_seq, connect_seq, - client_cookie, server_cookie, - io_stat_printer{io_handler}); - io_handler.dispatch_connect(); - if (unlikely(state != state_t::CONNECTING)) { - logger().debug("{} triggered {} after ms_handle_connect(), abort", - conn, get_state_name(state)); - abort_protocol(); - } - execute_ready(); - break; - } - case next_step_t::wait: { - logger().info("{} execute_connecting(): going to WAIT(max-backoff)", conn); - ceph_assert_always(is_socket_valid); - frame_assembler->shutdown_socket(); - is_socket_valid = false; - execute_wait(true); - break; - } - default: { - ceph_abort("impossible next step"); - } - } - }).handle_exception([this](std::exception_ptr eptr) { - fault(state_t::CONNECTING, "execute_connecting", eptr); + execute_ready(); }); + } + case next_step_t::wait: { + logger().info("{} execute_connecting(): going to WAIT(max-backoff)", conn); + ceph_assert_always(is_socket_valid); + frame_assembler->shutdown_socket(&gate); + is_socket_valid = false; + execute_wait(true); + return seastar::now(); + } + default: { + ceph_abort("impossible next step"); + } + } + }).handle_exception([this](std::exception_ptr eptr) { + fault(state_t::CONNECTING, "execute_connecting", eptr); }); + }); } // ACCEPTING state @@ -1061,7 +1205,8 @@ ProtocolV2::reuse_connection( has_socket = false; #ifdef UNIT_TESTS_BUILT if (conn.interceptor) { - conn.interceptor->register_conn_replaced(conn); + conn.interceptor->register_conn_replaced( + conn.get_local_shared_foreign_from_this()); } #endif // close this connection because all the necessary information is delivered @@ -1476,91 +1621,89 @@ ProtocolV2::server_reconnect() void ProtocolV2::execute_accepting() { assert(is_socket_valid); - trigger_state(state_t::ACCEPTING, io_state_t::none, false); + trigger_state(state_t::ACCEPTING, io_state_t::none); gate.dispatch_in_background("execute_accepting", conn, [this] { - return seastar::futurize_invoke([this] { + return seastar::futurize_invoke([this] { #ifdef UNIT_TESTS_BUILT - if (conn.interceptor) { - auto action = conn.interceptor->intercept( - conn, {custom_bp_t::SOCKET_ACCEPTED}); - switch (action) { - case bp_action_t::CONTINUE: - break; - case bp_action_t::FAULT: - logger().info("[Test] got FAULT"); - abort_in_fault(); - default: - ceph_abort("unexpected action from trap"); - } - } -#endif - auth_meta = seastar::make_lw_shared(); - frame_assembler->reset_handlers(); - frame_assembler->start_recording(); - return banner_exchange(false); - }).then([this] (auto&& ret) { - auto [_peer_type, _my_addr_from_peer] = std::move(ret); - ceph_assert(conn.get_peer_type() == 0); - conn.set_peer_type(_peer_type); - - conn.policy = messenger.get_policy(_peer_type); - logger().info("{} UPDATE: peer_type={}," - " policy(lossy={} server={} standby={} resetcheck={})", - conn, ceph_entity_type_name(_peer_type), - conn.policy.lossy, conn.policy.server, - conn.policy.standby, conn.policy.resetcheck); - if (!messenger.get_myaddr().is_blank_ip() && - (messenger.get_myaddr().get_port() != _my_addr_from_peer.get_port() || - messenger.get_myaddr().get_nonce() != _my_addr_from_peer.get_nonce())) { - logger().warn("{} my_addr_from_peer {} port/nonce doesn't match myaddr {}", - conn, _my_addr_from_peer, messenger.get_myaddr()); - throw std::system_error( - make_error_code(crimson::net::error::bad_peer_address)); - } - messenger.learned_addr(_my_addr_from_peer, conn); - return server_auth(); - }).then([this] { - return frame_assembler->read_main_preamble(); - }).then([this](auto ret) { - switch (ret.tag) { - case Tag::CLIENT_IDENT: - return server_connect(); - case Tag::SESSION_RECONNECT: - return server_reconnect(); - default: { - unexpected_tag(ret.tag, conn, "post_server_auth"); - return seastar::make_ready_future(next_step_t::none); - } - } - }).then([this] (next_step_t next) { - switch (next) { - case next_step_t::ready: - assert(state != state_t::ACCEPTING); - break; - case next_step_t::wait: - if (unlikely(state != state_t::ACCEPTING)) { - logger().debug("{} triggered {} at the end of execute_accepting()", - conn, get_state_name(state)); - abort_protocol(); - } - logger().info("{} execute_accepting(): going to SERVER_WAIT", conn); - execute_server_wait(); - break; - default: - ceph_abort("impossible next step"); - } - }).handle_exception([this](std::exception_ptr eptr) { - const char *e_what; - try { - std::rethrow_exception(eptr); - } catch (std::exception &e) { - e_what = e.what(); - } - logger().info("{} execute_accepting(): fault at {}, going to CLOSING -- {}", - conn, get_state_name(state), e_what); - do_close(false); + if (conn.interceptor) { + // only notify socket accepted + gate.dispatch_in_background( + "test_intercept_socket_accepted", conn, [this] { + return conn.interceptor->intercept( + conn, {Breakpoint{custom_bp_t::SOCKET_ACCEPTED}} + ).then([](bp_action_t action) { + ceph_assert(action == bp_action_t::CONTINUE); + }); }); + } +#endif + auth_meta = seastar::make_lw_shared(); + frame_assembler->reset_handlers(); + frame_assembler->start_recording(); + return banner_exchange(false); + }).then([this] (auto&& ret) { + auto [_peer_type, _my_addr_from_peer] = std::move(ret); + ceph_assert(conn.get_peer_type() == 0); + conn.set_peer_type(_peer_type); + + conn.policy = messenger.get_policy(_peer_type); + logger().info("{} UPDATE: peer_type={}," + " policy(lossy={} server={} standby={} resetcheck={})", + conn, ceph_entity_type_name(_peer_type), + conn.policy.lossy, conn.policy.server, + conn.policy.standby, conn.policy.resetcheck); + if (!messenger.get_myaddr().is_blank_ip() && + (messenger.get_myaddr().get_port() != _my_addr_from_peer.get_port() || + messenger.get_myaddr().get_nonce() != _my_addr_from_peer.get_nonce())) { + logger().warn("{} my_addr_from_peer {} port/nonce doesn't match myaddr {}", + conn, _my_addr_from_peer, messenger.get_myaddr()); + throw std::system_error( + make_error_code(crimson::net::error::bad_peer_address)); + } + messenger.learned_addr(_my_addr_from_peer, conn); + return server_auth(); + }).then([this] { + return frame_assembler->read_main_preamble(); + }).then([this](auto ret) { + switch (ret.tag) { + case Tag::CLIENT_IDENT: + return server_connect(); + case Tag::SESSION_RECONNECT: + return server_reconnect(); + default: { + unexpected_tag(ret.tag, conn, "post_server_auth"); + return seastar::make_ready_future(next_step_t::none); + } + } + }).then([this] (next_step_t next) { + switch (next) { + case next_step_t::ready: + assert(state != state_t::ACCEPTING); + break; + case next_step_t::wait: + if (unlikely(state != state_t::ACCEPTING)) { + logger().debug("{} triggered {} at the end of execute_accepting()", + conn, get_state_name(state)); + abort_protocol(); + } + logger().info("{} execute_accepting(): going to SERVER_WAIT", conn); + execute_server_wait(); + break; + default: + ceph_abort("impossible next step"); + } + }).handle_exception([this](std::exception_ptr eptr) { + const char *e_what; + try { + std::rethrow_exception(eptr); + } catch (std::exception &e) { + e_what = e.what(); + } + logger().info("{} execute_accepting(): fault at {}, going to CLOSING -- {}", + conn, get_state_name(state), e_what); + do_close(false); }); + }); } // CONNECTING or ACCEPTING state @@ -1609,10 +1752,22 @@ void ProtocolV2::execute_establishing(SocketConnectionRef existing_conn) { }; ceph_assert_always(is_socket_valid); - trigger_state(state_t::ESTABLISHING, io_state_t::delay, false); + trigger_state(state_t::ESTABLISHING, io_state_t::delay); + bool is_replace; if (existing_conn) { - static_cast(existing_conn->protocol.get())->do_close( - true /* is_dispatch_reset */, std::move(accept_me)); + logger().info("{} start establishing: gs={}, pgs={}, cs={}, " + "client_cookie={}, server_cookie={}, {}, new_sid={}, " + "close existing {}", + conn, global_seq, peer_global_seq, connect_seq, + client_cookie, server_cookie, + io_states, frame_assembler->get_socket_shard_id(), + *existing_conn); + is_replace = true; + ProtocolV2 *existing_proto = dynamic_cast( + existing_conn->protocol.get()); + existing_proto->do_close( + true, // is_dispatch_reset + std::move(accept_me)); if (unlikely(state != state_t::ESTABLISHING)) { logger().warn("{} triggered {} during execute_establishing(), " "the accept event will not be delivered!", @@ -1620,18 +1775,48 @@ void ProtocolV2::execute_establishing(SocketConnectionRef existing_conn) { abort_protocol(); } } else { + logger().info("{} start establishing: gs={}, pgs={}, cs={}, " + "client_cookie={}, server_cookie={}, {}, new_sid={}, " + "no existing", + conn, global_seq, peer_global_seq, connect_seq, + client_cookie, server_cookie, io_states, + frame_assembler->get_socket_shard_id()); + is_replace = false; accept_me(); } - io_handler.dispatch_accept(); - if (unlikely(state != state_t::ESTABLISHING)) { - logger().debug("{} triggered {} after ms_handle_accept() during execute_establishing()", - conn, get_state_name(state)); - abort_protocol(); - } + gated_execute("execute_establishing", conn, [this, is_replace] { + ceph_assert_always(state == state_t::ESTABLISHING); + + // set io_handler to a new shard + auto cc_seq = crosscore.prepare_submit(); + // there are 2 hops with dispatch_accept() + crosscore.prepare_submit(); + auto new_io_shard = frame_assembler->get_socket_shard_id(); + logger().debug("{} send {} IOHandler::dispatch_accept({})", + conn, cc_seq, new_io_shard); + ConnectionFRef conn_fref = seastar::make_foreign( + conn.shared_from_this()); + ceph_assert_always(!pr_switch_io_shard.has_value()); + pr_switch_io_shard = seastar::shared_promise<>(); + return seastar::smp::submit_to( + io_handler.get_shard_id(), + [this, cc_seq, new_io_shard, is_replace, + conn_fref=std::move(conn_fref)]() mutable { + return io_handler.dispatch_accept( + cc_seq, new_io_shard, std::move(conn_fref), is_replace); + }).then([this, new_io_shard] { + ceph_assert_always(io_handler.get_shard_id() == new_io_shard); + pr_switch_io_shard->set_value(); + pr_switch_io_shard = std::nullopt; + // user can make changes + + if (unlikely(state != state_t::ESTABLISHING)) { + logger().debug("{} triggered {} after dispatch_accept() during execute_establishing()", + conn, get_state_name(state)); + abort_protocol(); + } - gated_execute("execute_establishing", conn, [this] { - return seastar::futurize_invoke([this] { return send_server_ident(); }).then([this] { if (unlikely(state != state_t::ESTABLISHING)) { @@ -1639,11 +1824,7 @@ void ProtocolV2::execute_establishing(SocketConnectionRef existing_conn) { conn, get_state_name(state)); abort_protocol(); } - logger().info("{} established: gs={}, pgs={}, cs={}, " - "client_cookie={}, server_cookie={}, {}", - conn, global_seq, peer_global_seq, connect_seq, - client_cookie, server_cookie, - io_stat_printer{io_handler}); + logger().info("{} established, going to ready", conn); execute_ready(); }).handle_exception([this](std::exception_ptr eptr) { fault(state_t::ESTABLISHING, "execute_establishing", eptr); @@ -1656,15 +1837,26 @@ void ProtocolV2::execute_establishing(SocketConnectionRef existing_conn) { seastar::future<> ProtocolV2::send_server_ident() { + ceph_assert_always(state == state_t::ESTABLISHING || + state == state_t::REPLACING); // send_server_ident() logic // refered to async-conn v2: not assign gs to global_seq global_seq = messenger.get_global_seq(); - logger().debug("{} UPDATE: gs={} for server ident", conn, global_seq); + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} UPDATE: gs={} for server ident, " + "send {} IOHandler::reset_peer_state()", + conn, global_seq, cc_seq); // this is required for the case when this connection is being replaced - io_handler.requeue_out_sent_up_to(0); - io_handler.reset_session(false); + io_states.reset_peer_state(); + gate.dispatch_in_background( + "reset_peer_state", conn, [this, cc_seq] { + return seastar::smp::submit_to( + io_handler.get_shard_id(), [this, cc_seq] { + return io_handler.reset_peer_state(cc_seq); + }); + }); if (!conn.policy.lossy) { server_cookie = ceph::util::generate_random_number(1, -1ll); @@ -1709,13 +1901,21 @@ void ProtocolV2::trigger_replacing(bool reconnect, uint64_t new_connect_seq, uint64_t new_msg_seq) { + ceph_assert_always(state >= state_t::ESTABLISHING); + ceph_assert_always(state <= state_t::WAIT); ceph_assert_always(has_socket || state == state_t::CONNECTING); - ceph_assert_always(!mover.socket->is_shutdown()); - trigger_state(state_t::REPLACING, io_state_t::delay, false); + // mover.socket shouldn't be shutdown + + logger().info("{} start replacing ({}): pgs was {}, cs was {}, " + "client_cookie was {}, {}, new_sid={}", + conn, reconnect ? "reconnected" : "connected", + peer_global_seq, connect_seq, client_cookie, + io_states, mover.socket->get_shard_id()); if (is_socket_valid) { - frame_assembler->shutdown_socket(); + frame_assembler->shutdown_socket(&gate); is_socket_valid = false; } + trigger_state_phase1(state_t::REPLACING); gate.dispatch_in_background( "trigger_replacing", conn, @@ -1729,15 +1929,60 @@ void ProtocolV2::trigger_replacing(bool reconnect, new_peer_global_seq, new_connect_seq, new_msg_seq] () mutable { ceph_assert_always(state == state_t::REPLACING); - io_handler.dispatch_accept(); - // state may become CLOSING, close mover.socket and abort later - return wait_exit_io( + auto new_io_shard = mover.socket->get_shard_id(); + // state may become CLOSING below, but we cannot abort the chain until + // mover.socket is correctly handled (closed or replaced). + + // this is preemptive + return wait_switch_io_shard( ).then([this] { + if (unlikely(state != state_t::REPLACING)) { + ceph_assert_always(state == state_t::CLOSING); + return seastar::now(); + } + + trigger_state_phase2(state_t::REPLACING, io_state_t::delay); + return wait_exit_io(); + }).then([this] { + if (unlikely(state != state_t::REPLACING)) { + ceph_assert_always(state == state_t::CLOSING); + return seastar::now(); + } + ceph_assert_always(frame_assembler); protocol_timer.cancel(); auto done = std::move(execution_done); execution_done = seastar::now(); return done; + }).then([this, new_io_shard] { + if (unlikely(state != state_t::REPLACING)) { + ceph_assert_always(state == state_t::CLOSING); + return seastar::now(); + } + + // set io_handler to a new shard + // we should prevent parallel switching core attemps + auto cc_seq = crosscore.prepare_submit(); + // there are 2 hops with dispatch_accept() + crosscore.prepare_submit(); + logger().debug("{} send {} IOHandler::dispatch_accept({})", + conn, cc_seq, new_io_shard); + ConnectionFRef conn_fref = seastar::make_foreign( + conn.shared_from_this()); + ceph_assert_always(!pr_switch_io_shard.has_value()); + pr_switch_io_shard = seastar::shared_promise<>(); + return seastar::smp::submit_to( + io_handler.get_shard_id(), + [this, cc_seq, new_io_shard, + conn_fref=std::move(conn_fref)]() mutable { + return io_handler.dispatch_accept( + cc_seq, new_io_shard, std::move(conn_fref), false); + }).then([this, new_io_shard] { + ceph_assert_always(io_handler.get_shard_id() == new_io_shard); + pr_switch_io_shard->set_value(); + pr_switch_io_shard = std::nullopt; + // user can make changes + }); }).then([this, reconnect, do_reset, @@ -1749,9 +1994,13 @@ void ProtocolV2::trigger_replacing(bool reconnect, new_connect_seq, new_msg_seq] () mutable { if (state == state_t::REPLACING && do_reset) { reset_session(true); + // user can make changes } if (unlikely(state != state_t::REPLACING)) { + logger().debug("{} triggered {} in the middle of trigger_replacing(), abort", + conn, get_state_name(state)); + ceph_assert_always(state == state_t::CLOSING); return mover.socket->close( ).then([sock = std::move(mover.socket)] { abort_protocol(); @@ -1773,9 +2022,21 @@ void ProtocolV2::trigger_replacing(bool reconnect, if (reconnect) { connect_seq = new_connect_seq; // send_reconnect_ok() logic - io_handler.requeue_out_sent_up_to(new_msg_seq); - auto reconnect_ok = ReconnectOkFrame::Encode(io_handler.get_in_seq()); - logger().debug("{} WRITE ReconnectOkFrame: msg_seq={}", conn, io_handler.get_in_seq()); + + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} send {} IOHandler::requeue_out_sent_up_to({})", + conn, cc_seq, new_msg_seq); + io_states.requeue_out_sent_up_to(); + gate.dispatch_in_background( + "requeue_out_replacing", conn, [this, cc_seq, new_msg_seq] { + return seastar::smp::submit_to( + io_handler.get_shard_id(), [this, cc_seq, new_msg_seq] { + return io_handler.requeue_out_sent_up_to(cc_seq, new_msg_seq); + }); + }); + + auto reconnect_ok = ReconnectOkFrame::Encode(io_states.in_seq); + logger().debug("{} WRITE ReconnectOkFrame: msg_seq={}", conn, io_states.in_seq); return frame_assembler->write_flush_frame(reconnect_ok); } else { client_cookie = new_client_cookie; @@ -1791,16 +2052,17 @@ void ProtocolV2::trigger_replacing(bool reconnect, } }).then([this, reconnect] { if (unlikely(state != state_t::REPLACING)) { - logger().debug("{} triggered {} at the end of trigger_replacing()", + logger().debug("{} triggered {} at the end of trigger_replacing(), abort", conn, get_state_name(state)); + ceph_assert_always(state == state_t::CLOSING); abort_protocol(); } - logger().info("{} replaced ({}): gs={}, pgs={}, cs={}, " + logger().info("{} replaced ({}), going to ready: " + "gs={}, pgs={}, cs={}, " "client_cookie={}, server_cookie={}, {}", conn, reconnect ? "reconnected" : "connected", global_seq, peer_global_seq, connect_seq, - client_cookie, server_cookie, - io_stat_printer{io_handler}); + client_cookie, server_cookie, io_states); execute_ready(); }).handle_exception([this](std::exception_ptr eptr) { fault(state_t::REPLACING, "trigger_replacing", eptr); @@ -1810,9 +2072,27 @@ void ProtocolV2::trigger_replacing(bool reconnect, // READY state -void ProtocolV2::notify_out_fault(const char *where, std::exception_ptr eptr) +seastar::future<> ProtocolV2::notify_out_fault( + crosscore_t::seq_t cc_seq, + const char *where, + std::exception_ptr eptr, + io_handler_state _io_states) { + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} notify_out_fault(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq, where, eptr, _io_states] { + return notify_out_fault(cc_seq, where, eptr, _io_states); + }); + } + + io_states = _io_states; + logger().debug("{} got {} notify_out_fault(): io_states={}", + conn, cc_seq, io_states); fault(state_t::READY, where, eptr); + return seastar::now(); } void ProtocolV2::execute_ready() @@ -1820,7 +2100,16 @@ void ProtocolV2::execute_ready() assert(conn.policy.lossy || (client_cookie != 0 && server_cookie != 0)); protocol_timer.cancel(); ceph_assert_always(is_socket_valid); - trigger_state(state_t::READY, io_state_t::open, false); + // I'm not responsible to shutdown the socket at READY + is_socket_valid = false; + trigger_state(state_t::READY, io_state_t::open); +#ifdef UNIT_TESTS_BUILT + if (conn.interceptor) { + // FIXME: doesn't support cross-core + conn.interceptor->register_conn_ready( + conn.get_local_shared_foreign_from_this()); + } +#endif } // STANDBY state @@ -1828,16 +2117,31 @@ void ProtocolV2::execute_ready() void ProtocolV2::execute_standby() { ceph_assert_always(!is_socket_valid); - trigger_state(state_t::STANDBY, io_state_t::delay, false); + trigger_state(state_t::STANDBY, io_state_t::delay); } -void ProtocolV2::notify_out() +seastar::future<> ProtocolV2::notify_out( + crosscore_t::seq_t cc_seq) { + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} notify_out(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq] { + return notify_out(cc_seq); + }); + } + + logger().debug("{} got {} notify_out(): at {}", + conn, cc_seq, get_state_name(state)); + io_states.is_out_queued = true; if (unlikely(state == state_t::STANDBY && !conn.policy.server)) { logger().info("{} notify_out(): at {}, going to CONNECTING", conn, get_state_name(state)); execute_connecting(); } + return seastar::now(); } // WAIT state @@ -1845,7 +2149,7 @@ void ProtocolV2::notify_out() void ProtocolV2::execute_wait(bool max_backoff) { ceph_assert_always(!is_socket_valid); - trigger_state(state_t::WAIT, io_state_t::delay, false); + trigger_state(state_t::WAIT, io_state_t::delay); gated_execute("execute_wait", conn, [this, max_backoff] { double backoff = protocol_timer.last_dur(); if (max_backoff) { @@ -1883,10 +2187,10 @@ void ProtocolV2::execute_wait(bool max_backoff) void ProtocolV2::execute_server_wait() { ceph_assert_always(is_socket_valid); - trigger_state(state_t::SERVER_WAIT, io_state_t::none, false); + trigger_state(state_t::SERVER_WAIT, io_state_t::none); gated_execute("execute_server_wait", conn, [this] { return frame_assembler->read_exactly(1 - ).then([this](auto bl) { + ).then([this](auto bptr) { logger().warn("{} SERVER_WAIT got read, abort", conn); abort_in_fault(); }).handle_exception([this](std::exception_ptr eptr) { @@ -1905,9 +2209,23 @@ void ProtocolV2::execute_server_wait() // CLOSING state -void ProtocolV2::notify_mark_down() +seastar::future<> ProtocolV2::notify_mark_down( + crosscore_t::seq_t cc_seq) { + assert(seastar::this_shard_id() == conn.get_messenger_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} notify_mark_down(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq] { + return notify_mark_down(cc_seq); + }); + } + + logger().debug("{} got {} notify_mark_down()", + conn, cc_seq); do_close(false); + return seastar::now(); } seastar::future<> ProtocolV2::close_clean_yielded() @@ -1918,22 +2236,21 @@ seastar::future<> ProtocolV2::close_clean_yielded() // the container when seastar::parallel_for_each() is still iterating in it. // that'd lead to a segfault. return seastar::yield( - ).then([this, conn_ref = conn.shared_from_this()] { + ).then([this] { do_close(false); - // it can happen if close_clean() is called inside Dispatcher::ms_handle_reset() - // which will otherwise result in deadlock - assert(closed_clean_fut.valid()); - return closed_clean_fut.get_future(); - }); + return pr_closed_clean.get_shared_future(); + + // connection may be unreferenced from the messenger, + // so need to hold the additional reference. + }).finally([conn_ref = conn.shared_from_this()] {});; } void ProtocolV2::do_close( bool is_dispatch_reset, std::optional> f_accept_new) { - if (closed) { + if (state == state_t::CLOSING) { // already closing - assert(state == state_t::CLOSING); return; } @@ -1946,9 +2263,9 @@ void ProtocolV2::do_close( * atomic operations */ - closed = true; + ceph_assert_always(!gate.is_closed()); - // trigger close + // messenger registrations, must before user events messenger.closing_conn( seastar::static_pointer_cast( conn.shared_from_this())); @@ -1964,48 +2281,67 @@ void ProtocolV2::do_close( // cannot happen ceph_assert(false); } - protocol_timer.cancel(); - trigger_state(state_t::CLOSING, io_state_t::drop, false); - if (f_accept_new) { + // the replacing connection must be registerred after the replaced + // connection is unreigsterred. (*f_accept_new)(); } + + protocol_timer.cancel(); if (is_socket_valid) { - frame_assembler->shutdown_socket(); + frame_assembler->shutdown_socket(&gate); is_socket_valid = false; } - assert(!gate.is_closed()); - auto handshake_closed = gate.close(); - auto io_closed = io_handler.close_io( - is_dispatch_reset, is_replace); - - // asynchronous operations - assert(!closed_clean_fut.valid()); - closed_clean_fut = seastar::when_all( - std::move(handshake_closed), std::move(io_closed) - ).discard_result().then([this] { - ceph_assert_always(!exit_io.has_value()); - if (has_socket) { - ceph_assert_always(frame_assembler); - return frame_assembler->close_shutdown_socket(); - } else { - return seastar::now(); - } - }).then([this] { - logger().debug("{} closed!", conn); - messenger.closed_conn( - seastar::static_pointer_cast( - conn.shared_from_this())); + + trigger_state_phase1(state_t::CLOSING); + gate.dispatch_in_background( + "close_io", conn, [this, is_dispatch_reset, is_replace] { + // this is preemptive + return wait_switch_io_shard( + ).then([this, is_dispatch_reset, is_replace] { + trigger_state_phase2(state_t::CLOSING, io_state_t::drop); + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} send {} IOHandler::close_io(reset={}, replace={})", + conn, cc_seq, is_dispatch_reset, is_replace); + + std::ignore = gate.close( + ).then([this] { + ceph_assert_always(!need_exit_io); + ceph_assert_always(!pr_exit_io.has_value()); + if (has_socket) { + ceph_assert_always(frame_assembler); + return frame_assembler->close_shutdown_socket(); + } else { + return seastar::now(); + } + }).then([this] { + logger().debug("{} closed!", conn); + messenger.closed_conn( + seastar::static_pointer_cast( + conn.shared_from_this())); + pr_closed_clean.set_value(); #ifdef UNIT_TESTS_BUILT - closed_clean = true; - if (conn.interceptor) { - conn.interceptor->register_conn_closed(conn); - } + closed_clean = true; + if (conn.interceptor) { + conn.interceptor->register_conn_closed( + conn.get_local_shared_foreign_from_this()); + } #endif - }).handle_exception([conn_ref = conn.shared_from_this(), this] (auto eptr) { - logger().error("{} closing: closed_clean_fut got unexpected exception {}", - conn, eptr); - ceph_abort(); + // connection is unreferenced from the messenger, + // so need to hold the additional reference. + }).handle_exception([conn_ref = conn.shared_from_this(), this] (auto eptr) { + logger().error("{} closing got unexpected exception {}", + conn, eptr); + ceph_abort(); + }); + + return seastar::smp::submit_to( + io_handler.get_shard_id(), + [this, cc_seq, is_dispatch_reset, is_replace] { + return io_handler.close_io(cc_seq, is_dispatch_reset, is_replace); + }); + // user can make changes + }); }); } diff --git a/ceph/src/crimson/net/ProtocolV2.h b/ceph/src/crimson/net/ProtocolV2.h index b6f67b566..dd7a1e703 100644 --- a/ceph/src/crimson/net/ProtocolV2.h +++ b/ceph/src/crimson/net/ProtocolV2.h @@ -28,11 +28,17 @@ public: * as HandshakeListener */ private: - void notify_out() final; + seastar::future<> notify_out( + crosscore_t::seq_t cc_seq) final; - void notify_out_fault(const char *, std::exception_ptr) final; + seastar::future<> notify_out_fault( + crosscore_t::seq_t cc_seq, + const char *where, + std::exception_ptr, + io_handler_state) final; - void notify_mark_down() final; + seastar::future<> notify_mark_down( + crosscore_t::seq_t cc_seq) final; /* * as ProtocolV2 to be called by SocketConnection @@ -41,26 +47,45 @@ public: void start_connect(const entity_addr_t& peer_addr, const entity_name_t& peer_name); - void start_accept(SocketRef&& socket, + void start_accept(SocketFRef&& socket, const entity_addr_t& peer_addr); seastar::future<> close_clean_yielded(); #ifdef UNIT_TESTS_BUILT + bool is_ready() const { + return state == state_t::READY; + } + + bool is_standby() const { + return state == state_t::STANDBY; + } + bool is_closed_clean() const { return closed_clean; } bool is_closed() const { - return closed; + return state == state_t::CLOSING; } #endif private: + using io_state_t = IOHandler::io_state_t; + + seastar::future<> wait_switch_io_shard() { + if (pr_switch_io_shard.has_value()) { + return pr_switch_io_shard->get_shared_future(); + } else { + return seastar::now(); + } + } + seastar::future<> wait_exit_io() { - if (exit_io.has_value()) { - return exit_io->get_shared_future(); + if (pr_exit_io.has_value()) { + return pr_exit_io->get_shared_future(); } else { + assert(!need_exit_io); return seastar::now(); } } @@ -92,7 +117,15 @@ private: return statenames[static_cast(state)]; } - void trigger_state(state_t state, IOHandler::io_state_t io_state, bool reentrant); + void trigger_state_phase1(state_t new_state); + + void trigger_state_phase2(state_t new_state, io_state_t new_io_state); + + void trigger_state(state_t new_state, io_state_t new_io_state) { + ceph_assert_always(!pr_switch_io_shard.has_value()); + trigger_state_phase1(new_state); + trigger_state_phase2(new_state, new_io_state); + } template void gated_execute(const char *what, T &who, Func &&func) { @@ -215,6 +248,11 @@ private: IOHandler &io_handler; + // asynchronously populated from io_handler + io_handler_state io_states; + + crosscore_t crosscore; + bool has_socket = false; // the socket exists and it is not shutdown @@ -222,16 +260,19 @@ private: FrameAssemblerV2Ref frame_assembler; - std::optional> exit_io; + bool need_notify_out = false; + + std::optional> pr_switch_io_shard; + + bool need_exit_io = false; + + std::optional> pr_exit_io; AuthConnectionMetaRef auth_meta; crimson::common::Gated gate; - bool closed = false; - - // become valid only after closed == true - seastar::shared_future<> closed_clean_fut; + seastar::shared_promise<> pr_closed_clean; #ifdef UNIT_TESTS_BUILT bool closed_clean = false; diff --git a/ceph/src/crimson/net/Socket.cc b/ceph/src/crimson/net/Socket.cc index 6434a407f..95b1e2250 100644 --- a/ceph/src/crimson/net/Socket.cc +++ b/ceph/src/crimson/net/Socket.cc @@ -5,6 +5,7 @@ #include #include +#include #include "crimson/common/log.h" #include "Errors.h" @@ -19,6 +20,9 @@ seastar::logger& logger() { return crimson::get_logger(ceph_subsys_ms); } +using tmp_buf = seastar::temporary_buffer; +using packet = seastar::net::packet; + // an input_stream consumer that reads buffer segments into a bufferlist up to // the given number of remaining bytes struct bufferlist_consumer { @@ -28,7 +32,6 @@ struct bufferlist_consumer { bufferlist_consumer(bufferlist& bl, size_t& remaining) : bl(bl), remaining(remaining) {} - using tmp_buf = seastar::temporary_buffer; using consumption_result_type = typename seastar::input_stream::consumption_result_type; // consume some or all of a buffer segment @@ -59,10 +62,64 @@ struct bufferlist_consumer { }; }; +seastar::future<> inject_delay() +{ + if (float delay_period = local_conf()->ms_inject_internal_delays; + delay_period) { + logger().debug("Socket::inject_delay: sleep for {}", delay_period); + return seastar::sleep( + std::chrono::milliseconds((int)(delay_period * 1000.0))); + } + return seastar::now(); +} + +void inject_failure() +{ + if (local_conf()->ms_inject_socket_failures) { + uint64_t rand = + ceph::util::generate_random_number(1, RAND_MAX); + if (rand % local_conf()->ms_inject_socket_failures == 0) { + logger().warn("Socket::inject_failure: injecting socket failure"); + throw std::system_error(make_error_code( + error::negotiation_failure)); + } + } +} + } // anonymous namespace -seastar::future Socket::read(size_t bytes) +Socket::Socket( + seastar::connected_socket &&_socket, + side_t _side, + uint16_t e_port, + construct_tag) + : sid{seastar::this_shard_id()}, + socket(std::move(_socket)), + in(socket.input()), + // the default buffer size 8192 is too small that may impact our write + // performance. see seastar::net::connected_socket::output() + out(socket.output(65536)), + socket_is_shutdown(false), + side(_side), + ephemeral_port(e_port) { + if (local_conf()->ms_tcp_nodelay) { + socket.set_nodelay(true); + } +} + +Socket::~Socket() +{ + assert(seastar::this_shard_id() == sid); +#ifndef NDEBUG + assert(closed); +#endif +} + +seastar::future +Socket::read(size_t bytes) +{ + assert(seastar::this_shard_id() == sid); #ifdef UNIT_TESTS_BUILT return try_trap_pre(next_trap_read).then([bytes, this] { #endif @@ -81,44 +138,103 @@ seastar::future Socket::read(size_t bytes) }); }); #ifdef UNIT_TESTS_BUILT - }).then([this] (auto buf) { + }).then([this](auto buf) { return try_trap_post(next_trap_read - ).then([buf = std::move(buf)] () mutable { + ).then([buf = std::move(buf)]() mutable { return std::move(buf); }); }); #endif } -seastar::future> +seastar::future Socket::read_exactly(size_t bytes) { + assert(seastar::this_shard_id() == sid); #ifdef UNIT_TESTS_BUILT return try_trap_pre(next_trap_read).then([bytes, this] { #endif if (bytes == 0) { - return seastar::make_ready_future>(); + return seastar::make_ready_future(); } return in.read_exactly(bytes).then([bytes](auto buf) { - if (buf.size() < bytes) { + bufferptr ptr(buffer::create(buf.share())); + if (ptr.length() < bytes) { throw std::system_error(make_error_code(error::read_eof)); } inject_failure(); return inject_delay( - ).then([buf = std::move(buf)] () mutable { - return seastar::make_ready_future(std::move(buf)); + ).then([ptr = std::move(ptr)]() mutable { + return seastar::make_ready_future(std::move(ptr)); }); }); #ifdef UNIT_TESTS_BUILT - }).then([this] (auto buf) { + }).then([this](auto ptr) { return try_trap_post(next_trap_read - ).then([buf = std::move(buf)] () mutable { - return std::move(buf); + ).then([ptr = std::move(ptr)]() mutable { + return std::move(ptr); }); }); #endif } -void Socket::shutdown() { +seastar::future<> +Socket::write(bufferlist buf) +{ + assert(seastar::this_shard_id() == sid); +#ifdef UNIT_TESTS_BUILT + return try_trap_pre(next_trap_write + ).then([buf = std::move(buf), this]() mutable { +#endif + inject_failure(); + return inject_delay( + ).then([buf = std::move(buf), this]() mutable { + packet p(std::move(buf)); + return out.write(std::move(p)); + }); +#ifdef UNIT_TESTS_BUILT + }).then([this] { + return try_trap_post(next_trap_write); + }); +#endif +} + +seastar::future<> +Socket::flush() +{ + assert(seastar::this_shard_id() == sid); + inject_failure(); + return inject_delay().then([this] { + return out.flush(); + }); +} + +seastar::future<> +Socket::write_flush(bufferlist buf) +{ + assert(seastar::this_shard_id() == sid); +#ifdef UNIT_TESTS_BUILT + return try_trap_pre(next_trap_write + ).then([buf = std::move(buf), this]() mutable { +#endif + inject_failure(); + return inject_delay( + ).then([buf = std::move(buf), this]() mutable { + packet p(std::move(buf)); + return out.write(std::move(p) + ).then([this] { + return out.flush(); + }); + }); +#ifdef UNIT_TESTS_BUILT + }).then([this] { + return try_trap_post(next_trap_write); + }); +#endif +} + +void Socket::shutdown() +{ + assert(seastar::this_shard_id() == sid); socket_is_shutdown = true; socket.shutdown_input(); socket.shutdown_output(); @@ -127,19 +243,22 @@ void Socket::shutdown() { static inline seastar::future<> close_and_handle_errors(seastar::output_stream& out) { - return out.close().handle_exception_type([] (const std::system_error& e) { + return out.close().handle_exception_type([](const std::system_error& e) { if (e.code() != std::errc::broken_pipe && e.code() != std::errc::connection_reset) { - logger().error("Socket::close(): unexpected error {}", e); + logger().error("Socket::close(): unexpected error {}", e.what()); ceph_abort(); } // can happen when out is already shutdown, ignore }); } -seastar::future<> Socket::close() { +seastar::future<> +Socket::close() +{ + assert(seastar::this_shard_id() == sid); #ifndef NDEBUG - ceph_assert(!closed); + ceph_assert_always(!closed); closed = true; #endif return seastar::when_all_succeed( @@ -148,39 +267,55 @@ seastar::future<> Socket::close() { close_and_handle_errors(out) ).then_unpack([] { return seastar::make_ready_future<>(); - }).handle_exception([] (auto eptr) { - logger().error("Socket::close(): unexpected exception {}", eptr); + }).handle_exception([](auto eptr) { + const char *e_what; + try { + std::rethrow_exception(eptr); + } catch (std::exception &e) { + e_what = e.what(); + } + logger().error("Socket::close(): unexpected exception {}", e_what); ceph_abort(); }); } -seastar::future<> Socket::inject_delay () { - if (float delay_period = local_conf()->ms_inject_internal_delays; - delay_period) { - logger().debug("Socket::inject_delay: sleep for {}", delay_period); - return seastar::sleep( - std::chrono::milliseconds((int)(delay_period * 1000.0))); - } - return seastar::now(); +seastar::future +Socket::connect(const entity_addr_t &peer_addr) +{ + inject_failure(); + return inject_delay( + ).then([peer_addr] { + return seastar::connect(peer_addr.in4_addr()); + }).then([peer_addr](seastar::connected_socket socket) { + auto ret = std::make_unique( + std::move(socket), side_t::connector, 0, construct_tag{}); + logger().debug("Socket::connect(): connected to {}, socket {}", + peer_addr, fmt::ptr(ret)); + return ret; + }); } -void Socket::inject_failure() -{ - if (local_conf()->ms_inject_socket_failures) { - uint64_t rand = - ceph::util::generate_random_number(1, RAND_MAX); - if (rand % local_conf()->ms_inject_socket_failures == 0) { - if (true) { - logger().warn("Socket::inject_failure: injecting socket failure"); - throw std::system_error(make_error_code( - crimson::net::error::negotiation_failure)); - } +#ifdef UNIT_TESTS_BUILT +void Socket::set_trap(bp_type_t type, bp_action_t action, socket_blocker* blocker_) { + assert(seastar::this_shard_id() == sid); + blocker = blocker_; + if (type == bp_type_t::READ) { + ceph_assert_always(next_trap_read == bp_action_t::CONTINUE); + next_trap_read = action; + } else { // type == bp_type_t::WRITE + if (next_trap_write == bp_action_t::CONTINUE) { + next_trap_write = action; + } else if (next_trap_write == bp_action_t::FAULT) { + // do_sweep_messages() may combine multiple write events into one socket write + ceph_assert_always(action == bp_action_t::FAULT || action == bp_action_t::CONTINUE); + } else { + ceph_abort(); } } } -#ifdef UNIT_TESTS_BUILT -seastar::future<> Socket::try_trap_pre(bp_action_t& trap) { +seastar::future<> +Socket::try_trap_pre(bp_action_t& trap) { auto action = trap; trap = bp_action_t::CONTINUE; switch (action) { @@ -188,7 +323,7 @@ seastar::future<> Socket::try_trap_pre(bp_action_t& trap) { break; case bp_action_t::FAULT: logger().info("[Test] got FAULT"); - throw std::system_error(make_error_code(crimson::net::error::negotiation_failure)); + throw std::system_error(make_error_code(error::negotiation_failure)); case bp_action_t::BLOCK: logger().info("[Test] got BLOCK"); return blocker->block(); @@ -201,7 +336,8 @@ seastar::future<> Socket::try_trap_pre(bp_action_t& trap) { return seastar::make_ready_future<>(); } -seastar::future<> Socket::try_trap_post(bp_action_t& trap) { +seastar::future<> +Socket::try_trap_post(bp_action_t& trap) { auto action = trap; trap = bp_action_t::CONTINUE; switch (action) { @@ -216,94 +352,170 @@ seastar::future<> Socket::try_trap_post(bp_action_t& trap) { } return seastar::make_ready_future<>(); } +#endif -void Socket::set_trap(bp_type_t type, bp_action_t action, socket_blocker* blocker_) { - blocker = blocker_; - if (type == bp_type_t::READ) { - ceph_assert(next_trap_read == bp_action_t::CONTINUE); - next_trap_read = action; - } else { // type == bp_type_t::WRITE - if (next_trap_write == bp_action_t::CONTINUE) { - next_trap_write = action; - } else if (next_trap_write == bp_action_t::FAULT) { - // do_sweep_messages() may combine multiple write events into one socket write - ceph_assert(action == bp_action_t::FAULT || action == bp_action_t::CONTINUE); - } else { - ceph_abort(); - } - } +ShardedServerSocket::ShardedServerSocket( + seastar::shard_id sid, + bool dispatch_only_on_primary_sid, + construct_tag) + : primary_sid{sid}, dispatch_only_on_primary_sid{dispatch_only_on_primary_sid} +{ +} + +ShardedServerSocket::~ShardedServerSocket() +{ + assert(!listener); + // detect whether user have called destroy() properly + ceph_assert_always(!service); } -#endif -crimson::net::listen_ertr::future<> -FixedCPUServerSocket::listen(entity_addr_t addr) +listen_ertr::future<> +ShardedServerSocket::listen(entity_addr_t addr) { - assert(seastar::this_shard_id() == cpu); - logger().trace("FixedCPUServerSocket::listen({})...", addr); - return container().invoke_on_all([addr] (auto& ss) { - ss.addr = addr; + ceph_assert_always(seastar::this_shard_id() == primary_sid); + logger().debug("ShardedServerSocket({})::listen()...", addr); + return this->container().invoke_on_all([addr](auto& ss) { + ss.listen_addr = addr; seastar::socket_address s_addr(addr.in4_addr()); seastar::listen_options lo; lo.reuse_address = true; - lo.set_fixed_cpu(ss.cpu); + if (ss.dispatch_only_on_primary_sid) { + lo.set_fixed_cpu(ss.primary_sid); + } ss.listener = seastar::listen(s_addr, lo); }).then([] { return listen_ertr::now(); }).handle_exception_type( - [addr] (const std::system_error& e) -> listen_ertr::future<> { + [addr](const std::system_error& e) -> listen_ertr::future<> { if (e.code() == std::errc::address_in_use) { - logger().trace("FixedCPUServerSocket::listen({}): address in use", addr); + logger().debug("ShardedServerSocket({})::listen(): address in use", addr); return crimson::ct_error::address_in_use::make(); } else if (e.code() == std::errc::address_not_available) { - logger().trace("FixedCPUServerSocket::listen({}): address not available", + logger().debug("ShardedServerSocket({})::listen(): address not available", addr); return crimson::ct_error::address_not_available::make(); } - logger().error("FixedCPUServerSocket::listen({}): " - "got unexpeted error {}", addr, e); + logger().error("ShardedServerSocket({})::listen(): " + "got unexpeted error {}", addr, e.what()); ceph_abort(); }); } -seastar::future<> FixedCPUServerSocket::shutdown() +seastar::future<> +ShardedServerSocket::accept(accept_func_t &&_fn_accept) { - assert(seastar::this_shard_id() == cpu); - logger().trace("FixedCPUServerSocket({})::shutdown()...", addr); - return container().invoke_on_all([] (auto& ss) { + ceph_assert_always(seastar::this_shard_id() == primary_sid); + logger().debug("ShardedServerSocket({})::accept()...", listen_addr); + return this->container().invoke_on_all([_fn_accept](auto &ss) { + assert(ss.listener); + ss.fn_accept = _fn_accept; + // gate accepting + // ShardedServerSocket::shutdown() will drain the continuations in the gate + // so ignore the returned future + std::ignore = seastar::with_gate(ss.shutdown_gate, [&ss] { + return seastar::keep_doing([&ss] { + return ss.listener->accept( + ).then([&ss](seastar::accept_result accept_result) { +#ifndef NDEBUG + if (ss.dispatch_only_on_primary_sid) { + // see seastar::listen_options::set_fixed_cpu() + ceph_assert_always(seastar::this_shard_id() == ss.primary_sid); + } +#endif + auto [socket, paddr] = std::move(accept_result); + entity_addr_t peer_addr; + peer_addr.set_sockaddr(&paddr.as_posix_sockaddr()); + peer_addr.set_type(ss.listen_addr.get_type()); + SocketRef _socket = std::make_unique( + std::move(socket), Socket::side_t::acceptor, + peer_addr.get_port(), Socket::construct_tag{}); + logger().debug("ShardedServerSocket({})::accept(): accepted peer {}, " + "socket {}, dispatch_only_on_primary_sid = {}", + ss.listen_addr, peer_addr, fmt::ptr(_socket), + ss.dispatch_only_on_primary_sid); + std::ignore = seastar::with_gate( + ss.shutdown_gate, + [socket=std::move(_socket), peer_addr, &ss]() mutable { + return ss.fn_accept(std::move(socket), peer_addr + ).handle_exception([&ss, peer_addr](auto eptr) { + const char *e_what; + try { + std::rethrow_exception(eptr); + } catch (std::exception &e) { + e_what = e.what(); + } + logger().error("ShardedServerSocket({})::accept(): " + "fn_accept(s, {}) got unexpected exception {}", + ss.listen_addr, peer_addr, e_what); + ceph_abort(); + }); + }); + }); + }).handle_exception_type([&ss](const std::system_error& e) { + if (e.code() == std::errc::connection_aborted || + e.code() == std::errc::invalid_argument) { + logger().debug("ShardedServerSocket({})::accept(): stopped ({})", + ss.listen_addr, e.what()); + } else { + throw; + } + }).handle_exception([&ss](auto eptr) { + const char *e_what; + try { + std::rethrow_exception(eptr); + } catch (std::exception &e) { + e_what = e.what(); + } + logger().error("ShardedServerSocket({})::accept(): " + "got unexpected exception {}", ss.listen_addr, e_what); + ceph_abort(); + }); + }); + }); +} + +seastar::future<> +ShardedServerSocket::shutdown_destroy() +{ + assert(seastar::this_shard_id() == primary_sid); + logger().debug("ShardedServerSocket({})::shutdown_destroy()...", listen_addr); + // shutdown shards + return this->container().invoke_on_all([](auto& ss) { if (ss.listener) { ss.listener->abort_accept(); } return ss.shutdown_gate.close(); }).then([this] { - return reset(); - }); -} - -seastar::future<> FixedCPUServerSocket::destroy() -{ - assert(seastar::this_shard_id() == cpu); - return shutdown().then([this] { - // we should only construct/stop shards on #0 - return container().invoke_on(0, [] (auto& ss) { + // destroy shards + return this->container().invoke_on_all([](auto& ss) { + assert(ss.shutdown_gate.is_closed()); + ss.listen_addr = entity_addr_t(); + ss.listener.reset(); + }); + }).then([this] { + // stop the sharded service: we should only construct/stop shards on #0 + return this->container().invoke_on(0, [](auto& ss) { assert(ss.service); return ss.service->stop().finally([cleanup = std::move(ss.service)] {}); }); }); } -seastar::future FixedCPUServerSocket::create() +seastar::future +ShardedServerSocket::create(bool dispatch_only_on_this_shard) { - auto cpu = seastar::this_shard_id(); - // we should only construct/stop shards on #0 - return seastar::smp::submit_to(0, [cpu] { + auto primary_sid = seastar::this_shard_id(); + // start the sharded service: we should only construct/stop shards on #0 + return seastar::smp::submit_to(0, [primary_sid, dispatch_only_on_this_shard] { auto service = std::make_unique(); - return service->start(cpu, construct_tag{} - ).then([service = std::move(service)] () mutable { + return service->start( + primary_sid, dispatch_only_on_this_shard, construct_tag{} + ).then([service = std::move(service)]() mutable { auto p_shard = service.get(); p_shard->local().service = std::move(service); return p_shard; }); - }).then([] (auto p_shard) { + }).then([](auto p_shard) { return &p_shard->local(); }); } diff --git a/ceph/src/crimson/net/Socket.h b/ceph/src/crimson/net/Socket.h index b6125eb8a..478f2d630 100644 --- a/ceph/src/crimson/net/Socket.h +++ b/ceph/src/crimson/net/Socket.h @@ -6,7 +6,6 @@ #include #include #include -#include #include "include/buffer.h" @@ -22,146 +21,98 @@ namespace crimson::net { class Socket; using SocketRef = std::unique_ptr; +using SocketFRef = seastar::foreign_ptr; -class Socket -{ +class Socket { struct construct_tag {}; - public: +public: // if acceptor side, peer is using a different port (ephemeral_port) // if connector side, I'm using a different port (ephemeral_port) enum class side_t { acceptor, connector }; + Socket(seastar::connected_socket &&, side_t, uint16_t e_port, construct_tag); - Socket(seastar::connected_socket&& _socket, side_t _side, uint16_t e_port, construct_tag) - : sid{seastar::this_shard_id()}, - socket(std::move(_socket)), - in(socket.input()), - // the default buffer size 8192 is too small that may impact our write - // performance. see seastar::net::connected_socket::output() - out(socket.output(65536)), - socket_is_shutdown(false), - side(_side), - ephemeral_port(e_port) {} - - ~Socket() { -#ifndef NDEBUG - assert(closed); -#endif - } + ~Socket(); Socket(Socket&& o) = delete; - static seastar::future - connect(const entity_addr_t& peer_addr) { - inject_failure(); - return inject_delay( - ).then([peer_addr] { - return seastar::connect(peer_addr.in4_addr()); - }).then([] (seastar::connected_socket socket) { - return std::make_unique( - std::move(socket), side_t::connector, 0, construct_tag{}); - }); + seastar::shard_id get_shard_id() const { + return sid; } - /// read the requested number of bytes into a bufferlist - seastar::future read(size_t bytes); - using tmp_buf = seastar::temporary_buffer; - using packet = seastar::net::packet; - seastar::future read_exactly(size_t bytes); - - seastar::future<> write(packet&& buf) { -#ifdef UNIT_TESTS_BUILT - return try_trap_pre(next_trap_write - ).then([buf = std::move(buf), this] () mutable { -#endif - inject_failure(); - return inject_delay( - ).then([buf = std::move(buf), this] () mutable { - return out.write(std::move(buf)); - }); -#ifdef UNIT_TESTS_BUILT - }).then([this] { - return try_trap_post(next_trap_write); - }); -#endif + side_t get_side() const { + return side; } - seastar::future<> flush() { - inject_failure(); - return inject_delay().then([this] { - return out.flush(); - }); + + uint16_t get_ephemeral_port() const { + return ephemeral_port; } - seastar::future<> write_flush(packet&& buf) { -#ifdef UNIT_TESTS_BUILT - return try_trap_pre(next_trap_write).then([buf = std::move(buf), this] () mutable { -#endif - inject_failure(); - return inject_delay( - ).then([buf = std::move(buf), this] () mutable { - return out.write(std::move(buf)).then([this] { return out.flush(); }); - }); -#ifdef UNIT_TESTS_BUILT - }).then([this] { - return try_trap_post(next_trap_write); - }); -#endif + + seastar::socket_address get_local_address() const { + return socket.local_address(); } bool is_shutdown() const { + assert(seastar::this_shard_id() == sid); return socket_is_shutdown; } + // learn my ephemeral_port as connector. + // unfortunately, there's no way to identify which port I'm using as + // connector with current seastar interface. + void learn_ephemeral_port_as_connector(uint16_t port) { + assert(side == side_t::connector && + (ephemeral_port == 0 || ephemeral_port == port)); + ephemeral_port = port; + } + + /// read the requested number of bytes into a bufferlist + seastar::future read(size_t bytes); + + seastar::future read_exactly(size_t bytes); + + seastar::future<> write(bufferlist); + + seastar::future<> flush(); + + seastar::future<> write_flush(bufferlist); + // preemptively disable further reads or writes, can only be shutdown once. void shutdown(); /// Socket can only be closed once. seastar::future<> close(); - static seastar::future<> inject_delay(); + static seastar::future + connect(const entity_addr_t& peer_addr); - static void inject_failure(); + /* + * test interfaces + */ // shutdown for tests void force_shutdown() { + assert(seastar::this_shard_id() == sid); socket.shutdown_input(); socket.shutdown_output(); } // shutdown input_stream only, for tests void force_shutdown_in() { + assert(seastar::this_shard_id() == sid); socket.shutdown_input(); } // shutdown output_stream only, for tests void force_shutdown_out() { + assert(seastar::this_shard_id() == sid); socket.shutdown_output(); } - side_t get_side() const { - return side; - } - - uint16_t get_ephemeral_port() const { - return ephemeral_port; - } - - // learn my ephemeral_port as connector. - // unfortunately, there's no way to identify which port I'm using as - // connector with current seastar interface. - void learn_ephemeral_port_as_connector(uint16_t port) { - assert(side == side_t::connector && - (ephemeral_port == 0 || ephemeral_port == port)); - ephemeral_port = port; - } - - seastar::socket_address get_local_address() const { - return socket.local_address(); - } - - private: +private: const seastar::shard_id sid; seastar::connected_socket socket; seastar::input_stream in; @@ -181,18 +132,20 @@ class Socket } r; #ifdef UNIT_TESTS_BUILT - public: +public: void set_trap(bp_type_t type, bp_action_t action, socket_blocker* blocker_); - private: +private: + seastar::future<> try_trap_pre(bp_action_t& trap); + + seastar::future<> try_trap_post(bp_action_t& trap); + bp_action_t next_trap_read = bp_action_t::CONTINUE; bp_action_t next_trap_write = bp_action_t::CONTINUE; socket_blocker* blocker = nullptr; - seastar::future<> try_trap_pre(bp_action_t& trap); - seastar::future<> try_trap_post(bp_action_t& trap); #endif - friend class FixedCPUServerSocket; + friend class ShardedServerSocket; }; using listen_ertr = crimson::errorator< @@ -200,105 +153,49 @@ using listen_ertr = crimson::errorator< crimson::ct_error::address_not_available // https://techoverflow.net/2021/08/06/how-i-fixed-python-oserror-errno-99-cannot-assign-requested-address/ >; -class FixedCPUServerSocket - : public seastar::peering_sharded_service { - const seastar::shard_id cpu; - entity_addr_t addr; - std::optional listener; - seastar::gate shutdown_gate; +class ShardedServerSocket + : public seastar::peering_sharded_service { + struct construct_tag {}; - using sharded_service_t = seastar::sharded; - std::unique_ptr service; +public: + ShardedServerSocket( + seastar::shard_id sid, + bool dispatch_only_on_primary_sid, + construct_tag); - struct construct_tag {}; + ~ShardedServerSocket(); - static seastar::logger& logger() { - return crimson::get_logger(ceph_subsys_ms); - } + ShardedServerSocket(ShardedServerSocket&&) = delete; + ShardedServerSocket(const ShardedServerSocket&) = delete; + ShardedServerSocket& operator=(ShardedServerSocket&&) = delete; + ShardedServerSocket& operator=(const ShardedServerSocket&) = delete; - seastar::future<> reset() { - return container().invoke_on_all([] (auto& ss) { - assert(ss.shutdown_gate.is_closed()); - ss.addr = entity_addr_t(); - ss.listener.reset(); - }); + bool is_fixed_shard_dispatching() const { + return dispatch_only_on_primary_sid; } -public: - FixedCPUServerSocket(seastar::shard_id cpu, construct_tag) : cpu{cpu} {} - ~FixedCPUServerSocket() { - assert(!listener); - // detect whether user have called destroy() properly - ceph_assert(!service); - } + listen_ertr::future<> listen(entity_addr_t addr); - FixedCPUServerSocket(FixedCPUServerSocket&&) = delete; - FixedCPUServerSocket(const FixedCPUServerSocket&) = delete; - FixedCPUServerSocket& operator=(const FixedCPUServerSocket&) = delete; + using accept_func_t = + std::function(SocketRef, entity_addr_t)>; + seastar::future<> accept(accept_func_t &&_fn_accept); - listen_ertr::future<> listen(entity_addr_t addr); + seastar::future<> shutdown_destroy(); - // fn_accept should be a nothrow function of type - // seastar::future<>(SocketRef, entity_addr_t) - template - seastar::future<> accept(Func&& fn_accept) { - assert(seastar::this_shard_id() == cpu); - logger().trace("FixedCPUServerSocket({})::accept()...", addr); - return container().invoke_on_all( - [fn_accept = std::move(fn_accept)] (auto& ss) mutable { - assert(ss.listener); - // gate accepting - // FixedCPUServerSocket::shutdown() will drain the continuations in the gate - // so ignore the returned future - std::ignore = seastar::with_gate(ss.shutdown_gate, - [&ss, fn_accept = std::move(fn_accept)] () mutable { - return seastar::keep_doing([&ss, fn_accept = std::move(fn_accept)] () mutable { - return ss.listener->accept().then( - [&ss, fn_accept = std::move(fn_accept)] - (seastar::accept_result accept_result) mutable { - // assert seastar::listen_options::set_fixed_cpu() works - assert(seastar::this_shard_id() == ss.cpu); - auto [socket, paddr] = std::move(accept_result); - entity_addr_t peer_addr; - peer_addr.set_sockaddr(&paddr.as_posix_sockaddr()); - peer_addr.set_type(ss.addr.get_type()); - SocketRef _socket = std::make_unique( - std::move(socket), Socket::side_t::acceptor, - peer_addr.get_port(), Socket::construct_tag{}); - std::ignore = seastar::with_gate(ss.shutdown_gate, - [socket = std::move(_socket), peer_addr, - &ss, fn_accept = std::move(fn_accept)] () mutable { - logger().trace("FixedCPUServerSocket({})::accept(): " - "accepted peer {}", ss.addr, peer_addr); - return fn_accept(std::move(socket), peer_addr - ).handle_exception([&ss, peer_addr] (auto eptr) { - logger().error("FixedCPUServerSocket({})::accept(): " - "fn_accept(s, {}) got unexpected exception {}", - ss.addr, peer_addr, eptr); - ceph_abort(); - }); - }); - }); - }).handle_exception_type([&ss] (const std::system_error& e) { - if (e.code() == std::errc::connection_aborted || - e.code() == std::errc::invalid_argument) { - logger().trace("FixedCPUServerSocket({})::accept(): stopped ({})", - ss.addr, e); - } else { - throw; - } - }).handle_exception([&ss] (auto eptr) { - logger().error("FixedCPUServerSocket({})::accept(): " - "got unexpected exception {}", ss.addr, eptr); - ceph_abort(); - }); - }); - }); - } + static seastar::future create( + bool dispatch_only_on_this_shard); - seastar::future<> shutdown(); - seastar::future<> destroy(); - static seastar::future create(); +private: + const seastar::shard_id primary_sid; + /// XXX: Remove once all infrastructure uses multi-core messenger + const bool dispatch_only_on_primary_sid; + entity_addr_t listen_addr; + std::optional listener; + seastar::gate shutdown_gate; + accept_func_t fn_accept; + + using sharded_service_t = seastar::sharded; + std::unique_ptr service; }; } // namespace crimson::net diff --git a/ceph/src/crimson/net/SocketConnection.cc b/ceph/src/crimson/net/SocketConnection.cc index 38e274873..57e5c12c1 100644 --- a/ceph/src/crimson/net/SocketConnection.cc +++ b/ceph/src/crimson/net/SocketConnection.cc @@ -28,8 +28,7 @@ namespace crimson::net { SocketConnection::SocketConnection(SocketMessenger& messenger, ChainedDispatchers& dispatchers) - : core(messenger.shard_id()), - messenger(messenger) + : msgr_sid{messenger.get_shard_id()}, messenger(messenger) { auto ret = create_handlers(dispatchers, *this); io_handler = std::move(ret.io_handler); @@ -37,7 +36,7 @@ SocketConnection::SocketConnection(SocketMessenger& messenger, #ifdef UNIT_TESTS_BUILT if (messenger.interceptor) { interceptor = messenger.interceptor; - interceptor->register_conn(*this); + interceptor->register_conn(this->get_local_shared_foreign_from_this()); } #endif } @@ -46,45 +45,51 @@ SocketConnection::~SocketConnection() {} bool SocketConnection::is_connected() const { - assert(seastar::this_shard_id() == shard_id()); return io_handler->is_connected(); } #ifdef UNIT_TESTS_BUILT -bool SocketConnection::is_closed() const +bool SocketConnection::is_protocol_ready() const { - assert(seastar::this_shard_id() == shard_id()); + assert(seastar::this_shard_id() == msgr_sid); + return protocol->is_ready(); +} + +bool SocketConnection::is_protocol_standby() const { + assert(seastar::this_shard_id() == msgr_sid); + return protocol->is_standby(); +} + +bool SocketConnection::is_protocol_closed() const +{ + assert(seastar::this_shard_id() == msgr_sid); return protocol->is_closed(); } -bool SocketConnection::is_closed_clean() const +bool SocketConnection::is_protocol_closed_clean() const { - assert(seastar::this_shard_id() == shard_id()); + assert(seastar::this_shard_id() == msgr_sid); return protocol->is_closed_clean(); } #endif bool SocketConnection::peer_wins() const { + assert(seastar::this_shard_id() == msgr_sid); return (messenger.get_myaddr() > peer_addr || policy.server); } -seastar::future<> SocketConnection::send(MessageURef msg) +seastar::future<> SocketConnection::send(MessageURef _msg) { - return seastar::smp::submit_to( - shard_id(), - [this, msg=std::move(msg)]() mutable { - return io_handler->send(std::move(msg)); - }); + // may be invoked from any core + MessageFRef msg = seastar::make_foreign(std::move(_msg)); + return io_handler->send(std::move(msg)); } seastar::future<> SocketConnection::send_keepalive() { - return seastar::smp::submit_to( - shard_id(), - [this] { - return io_handler->send_keepalive(); - }); + // may be invoked from any core + return io_handler->send_keepalive(); } SocketConnection::clock_t::time_point @@ -106,7 +111,6 @@ void SocketConnection::set_last_keepalive_ack(clock_t::time_point when) void SocketConnection::mark_down() { - assert(seastar::this_shard_id() == shard_id()); io_handler->mark_down(); } @@ -114,50 +118,103 @@ void SocketConnection::start_connect(const entity_addr_t& _peer_addr, const entity_name_t& _peer_name) { + assert(seastar::this_shard_id() == msgr_sid); protocol->start_connect(_peer_addr, _peer_name); } void -SocketConnection::start_accept(SocketRef&& sock, +SocketConnection::start_accept(SocketFRef&& sock, const entity_addr_t& _peer_addr) { + assert(seastar::this_shard_id() == msgr_sid); protocol->start_accept(std::move(sock), _peer_addr); } seastar::future<> SocketConnection::close_clean_yielded() { + assert(seastar::this_shard_id() == msgr_sid); return protocol->close_clean_yielded(); } -seastar::shard_id SocketConnection::shard_id() const { - return core; -} - seastar::socket_address SocketConnection::get_local_address() const { + assert(seastar::this_shard_id() == msgr_sid); return socket->get_local_address(); } ConnectionRef SocketConnection::get_local_shared_foreign_from_this() { - assert(seastar::this_shard_id() == shard_id()); + assert(seastar::this_shard_id() == msgr_sid); return make_local_shared_foreign( seastar::make_foreign(shared_from_this())); } +SocketMessenger & +SocketConnection::get_messenger() const +{ + assert(seastar::this_shard_id() == msgr_sid); + return messenger; +} + +seastar::shard_id +SocketConnection::get_messenger_shard_id() const +{ + return msgr_sid; +} + +void SocketConnection::set_peer_type(entity_type_t peer_type) { + assert(seastar::this_shard_id() == msgr_sid); + // it is not allowed to assign an unknown value when the current + // value is known + assert(!(peer_type == 0 && + peer_name.type() != 0)); + // it is not allowed to assign a different known value when the + // current value is also known. + assert(!(peer_type != 0 && + peer_name.type() != 0 && + peer_type != peer_name.type())); + peer_name._type = peer_type; +} + +void SocketConnection::set_peer_id(int64_t peer_id) { + assert(seastar::this_shard_id() == msgr_sid); + // it is not allowed to assign an unknown value when the current + // value is known + assert(!(peer_id == entity_name_t::NEW && + peer_name.num() != entity_name_t::NEW)); + // it is not allowed to assign a different known value when the + // current value is also known. + assert(!(peer_id != entity_name_t::NEW && + peer_name.num() != entity_name_t::NEW && + peer_id != peer_name.num())); + peer_name._num = peer_id; +} + +void SocketConnection::set_features(uint64_t f) { + assert(seastar::this_shard_id() == msgr_sid); + features = f; +} + +void SocketConnection::set_socket(Socket *s) { + assert(seastar::this_shard_id() == msgr_sid); + socket = s; +} + void SocketConnection::print(ostream& out) const { - out << (void*)this << " "; - messenger.print(out); - if (!socket) { - out << " >> " << get_peer_name() << " " << peer_addr; - } else if (socket->get_side() == Socket::side_t::acceptor) { - out << " >> " << get_peer_name() << " " << peer_addr - << "@" << socket->get_ephemeral_port(); - } else { // socket->get_side() == Socket::side_t::connector - out << "@" << socket->get_ephemeral_port() - << " >> " << get_peer_name() << " " << peer_addr; - } + out << (void*)this << " "; + messenger.print(out); + if (seastar::this_shard_id() != msgr_sid) { + out << " >> " << get_peer_name() << " " << peer_addr; + } else if (!socket) { + out << " >> " << get_peer_name() << " " << peer_addr; + } else if (socket->get_side() == Socket::side_t::acceptor) { + out << " >> " << get_peer_name() << " " << peer_addr + << "@" << socket->get_ephemeral_port(); + } else { // socket->get_side() == Socket::side_t::connector + out << "@" << socket->get_ephemeral_port() + << " >> " << get_peer_name() << " " << peer_addr; + } } } // namespace crimson::net diff --git a/ceph/src/crimson/net/SocketConnection.h b/ceph/src/crimson/net/SocketConnection.h index aa791b6e1..823d6c574 100644 --- a/ceph/src/crimson/net/SocketConnection.h +++ b/ceph/src/crimson/net/SocketConnection.h @@ -25,6 +25,7 @@ namespace crimson::net { class ProtocolV2; class SocketMessenger; +class SocketConnection; using SocketConnectionRef = seastar::shared_ptr; #ifdef UNIT_TESTS_BUILT @@ -35,6 +36,8 @@ class Interceptor; * ConnectionHandler * * The interface class to implement Connection, called by SocketConnection. + * + * The operations must be done in get_shard_id(). */ class ConnectionHandler { public: @@ -47,9 +50,11 @@ public: ConnectionHandler &operator=(const ConnectionHandler &) = delete; ConnectionHandler &operator=(ConnectionHandler &&) = delete; + virtual seastar::shard_id get_shard_id() const = 0; + virtual bool is_connected() const = 0; - virtual seastar::future<> send(MessageURef) = 0; + virtual seastar::future<> send(MessageFRef) = 0; virtual seastar::future<> send_keepalive() = 0; @@ -66,39 +71,20 @@ protected: }; class SocketConnection : public Connection { - const seastar::shard_id core; - - SocketMessenger& messenger; - - std::unique_ptr io_handler; - - std::unique_ptr protocol; - - SocketRef socket; - - entity_name_t peer_name = {0, entity_name_t::NEW}; - - entity_addr_t peer_addr; - - // which of the peer_addrs we're connecting to (as client) - // or should reconnect to (as peer) - entity_addr_t target_addr; - - uint64_t features = 0; - - ceph::net::Policy policy; - - uint64_t peer_global_id = 0; - - std::unique_ptr user_private; - - // Connection interfaces, public to users + /* + * Connection interfaces, public to users + * Working in ConnectionHandler::get_shard_id() + */ public: SocketConnection(SocketMessenger& messenger, ChainedDispatchers& dispatchers); ~SocketConnection() override; + const seastar::shard_id get_shard_id() const override { + return io_handler->get_shard_id(); + } + const entity_name_t &get_peer_name() const override { return peer_name; } @@ -145,7 +131,10 @@ class SocketConnection : public Connection { void print(std::ostream& out) const override; - // public to SocketMessenger + /* + * Public to SocketMessenger + * Working in SocketMessenger::get_shard_id(); + */ public: /// start a handshake from the client's perspective, /// only call when SocketConnection first construct @@ -154,61 +143,41 @@ class SocketConnection : public Connection { /// start a handshake from the server's perspective, /// only call when SocketConnection first construct - void start_accept(SocketRef&& socket, + void start_accept(SocketFRef&& socket, const entity_addr_t& peer_addr); seastar::future<> close_clean_yielded(); seastar::socket_address get_local_address() const; - SocketMessenger &get_messenger() const { - return messenger; - } + seastar::shard_id get_messenger_shard_id() const; + + SocketMessenger &get_messenger() const; ConnectionRef get_local_shared_foreign_from_this(); private: - seastar::shard_id shard_id() const; - - void set_peer_type(entity_type_t peer_type) { - // it is not allowed to assign an unknown value when the current - // value is known - assert(!(peer_type == 0 && - peer_name.type() != 0)); - // it is not allowed to assign a different known value when the - // current value is also known. - assert(!(peer_type != 0 && - peer_name.type() != 0 && - peer_type != peer_name.type())); - peer_name._type = peer_type; - } + void set_peer_type(entity_type_t peer_type); - void set_peer_id(int64_t peer_id) { - // it is not allowed to assign an unknown value when the current - // value is known - assert(!(peer_id == entity_name_t::NEW && - peer_name.num() != entity_name_t::NEW)); - // it is not allowed to assign a different known value when the - // current value is also known. - assert(!(peer_id != entity_name_t::NEW && - peer_name.num() != entity_name_t::NEW && - peer_id != peer_name.num())); - peer_name._num = peer_id; - } + void set_peer_id(int64_t peer_id); void set_peer_name(entity_name_t name) { set_peer_type(name.type()); set_peer_id(name.num()); } - void set_features(uint64_t f) { - features = f; - } + void set_features(uint64_t f); + + void set_socket(Socket *s); #ifdef UNIT_TESTS_BUILT - bool is_closed_clean() const override; + bool is_protocol_ready() const override; + + bool is_protocol_standby() const override; - bool is_closed() const override; + bool is_protocol_closed_clean() const override; + + bool is_protocol_closed() const override; // peer wins if myaddr > peeraddr bool peer_wins() const override; @@ -219,6 +188,42 @@ private: bool peer_wins() const; #endif +private: + const seastar::shard_id msgr_sid; + + /* + * Core owner is messenger core, may allow to access from the I/O core. + */ + SocketMessenger& messenger; + + std::unique_ptr protocol; + + Socket *socket = nullptr; + + entity_name_t peer_name = {0, entity_name_t::NEW}; + + entity_addr_t peer_addr; + + // which of the peer_addrs we're connecting to (as client) + // or should reconnect to (as peer) + entity_addr_t target_addr; + + uint64_t features = 0; + + ceph::net::Policy policy; + + uint64_t peer_global_id = 0; + + /* + * Core owner is I/O core (mutable). + */ + std::unique_ptr io_handler; + + /* + * Core owner is up to the connection user. + */ + std::unique_ptr user_private; + friend class IOHandler; friend class ProtocolV2; friend class FrameAssemblerV2; diff --git a/ceph/src/crimson/net/SocketMessenger.cc b/ceph/src/crimson/net/SocketMessenger.cc index a112b5080..382d08f98 100644 --- a/ceph/src/crimson/net/SocketMessenger.cc +++ b/ceph/src/crimson/net/SocketMessenger.cc @@ -34,21 +34,25 @@ namespace crimson::net { SocketMessenger::SocketMessenger(const entity_name_t& myname, const std::string& logic_name, - uint32_t nonce) - : master_sid{seastar::this_shard_id()}, + uint32_t nonce, + bool dispatch_only_on_this_shard) + : sid{seastar::this_shard_id()}, logic_name{logic_name}, nonce{nonce}, + dispatch_only_on_sid{dispatch_only_on_this_shard}, my_name{myname} {} SocketMessenger::~SocketMessenger() { logger().debug("~SocketMessenger: {}", logic_name); + ceph_assert_always(seastar::this_shard_id() == sid); ceph_assert(!listener); } bool SocketMessenger::set_addr_unknowns(const entity_addrvec_t &addrs) { + assert(seastar::this_shard_id() == sid); bool ret = false; entity_addrvec_t newaddrs = my_addrs; @@ -76,7 +80,7 @@ bool SocketMessenger::set_addr_unknowns(const entity_addrvec_t &addrs) void SocketMessenger::set_myaddrs(const entity_addrvec_t& addrs) { - assert(seastar::this_shard_id() == master_sid); + assert(seastar::this_shard_id() == sid); my_addrs = addrs; for (auto& addr : my_addrs.v) { addr.nonce = nonce; @@ -86,12 +90,12 @@ void SocketMessenger::set_myaddrs(const entity_addrvec_t& addrs) crimson::net::listen_ertr::future<> SocketMessenger::do_listen(const entity_addrvec_t& addrs) { - assert(seastar::this_shard_id() == master_sid); ceph_assert(addrs.front().get_family() == AF_INET); set_myaddrs(addrs); return seastar::futurize_invoke([this] { if (!listener) { - return FixedCPUServerSocket::create().then([this] (auto _listener) { + return ShardedServerSocket::create(dispatch_only_on_sid + ).then([this] (auto _listener) { listener = _listener; }); } else { @@ -161,6 +165,7 @@ SocketMessenger::try_bind(const entity_addrvec_t& addrs, SocketMessenger::bind_ertr::future<> SocketMessenger::bind(const entity_addrvec_t& addrs) { + assert(seastar::this_shard_id() == sid); using crimson::common::local_conf; return seastar::do_with(int64_t{local_conf()->ms_bind_retry_count}, [this, addrs] (auto& count) { @@ -204,9 +209,19 @@ SocketMessenger::bind(const entity_addrvec_t& addrs) }); } +seastar::future<> SocketMessenger::accept( + SocketFRef &&socket, const entity_addr_t &peer_addr) +{ + assert(seastar::this_shard_id() == sid); + SocketConnectionRef conn = + seastar::make_shared(*this, dispatchers); + conn->start_accept(std::move(socket), peer_addr); + return seastar::now(); +} + seastar::future<> SocketMessenger::start( const dispatchers_t& _dispatchers) { - assert(seastar::this_shard_id() == master_sid); + assert(seastar::this_shard_id() == sid); dispatchers.assign(_dispatchers); if (listener) { @@ -214,13 +229,17 @@ seastar::future<> SocketMessenger::start( ceph_assert(get_myaddr().is_msgr2()); ceph_assert(get_myaddr().get_port() > 0); - return listener->accept([this] (SocketRef socket, entity_addr_t peer_addr) { - assert(seastar::this_shard_id() == master_sid); + return listener->accept([this](SocketRef _socket, entity_addr_t peer_addr) { assert(get_myaddr().is_msgr2()); - SocketConnectionRef conn = - seastar::make_shared(*this, dispatchers); - conn->start_accept(std::move(socket), peer_addr); - return seastar::now(); + SocketFRef socket = seastar::make_foreign(std::move(_socket)); + if (listener->is_fixed_shard_dispatching()) { + return accept(std::move(socket), peer_addr); + } else { + return seastar::smp::submit_to(sid, + [this, peer_addr, socket = std::move(socket)]() mutable { + return accept(std::move(socket), peer_addr); + }); + } }); } return seastar::now(); @@ -229,7 +248,7 @@ seastar::future<> SocketMessenger::start( crimson::net::ConnectionRef SocketMessenger::connect(const entity_addr_t& peer_addr, const entity_name_t& peer_name) { - assert(seastar::this_shard_id() == master_sid); + assert(seastar::this_shard_id() == sid); // make sure we connect to a valid peer_addr if (!peer_addr.is_msgr2()) { @@ -249,13 +268,13 @@ SocketMessenger::connect(const entity_addr_t& peer_addr, const entity_name_t& pe seastar::future<> SocketMessenger::shutdown() { - assert(seastar::this_shard_id() == master_sid); + assert(seastar::this_shard_id() == sid); return seastar::futurize_invoke([this] { assert(dispatchers.empty()); if (listener) { auto d_listener = listener; listener = nullptr; - return d_listener->destroy(); + return d_listener->shutdown_destroy(); } else { return seastar::now(); } @@ -306,7 +325,7 @@ void SocketMessenger::learned_addr( const entity_addr_t &peer_addr_for_me, const SocketConnection& conn) { - assert(seastar::this_shard_id() == master_sid); + assert(seastar::this_shard_id() == sid); if (!need_addr) { if ((!get_myaddr().is_any() && get_myaddr().get_type() != peer_addr_for_me.get_type()) || @@ -363,34 +382,40 @@ void SocketMessenger::learned_addr( SocketPolicy SocketMessenger::get_policy(entity_type_t peer_type) const { + assert(seastar::this_shard_id() == sid); return policy_set.get(peer_type); } SocketPolicy SocketMessenger::get_default_policy() const { + assert(seastar::this_shard_id() == sid); return policy_set.get_default(); } void SocketMessenger::set_default_policy(const SocketPolicy& p) { + assert(seastar::this_shard_id() == sid); policy_set.set_default(p); } void SocketMessenger::set_policy(entity_type_t peer_type, const SocketPolicy& p) { + assert(seastar::this_shard_id() == sid); policy_set.set(peer_type, p); } void SocketMessenger::set_policy_throttler(entity_type_t peer_type, Throttle* throttle) { + assert(seastar::this_shard_id() == sid); // only byte throttler is used in OSD policy_set.set_throttlers(peer_type, throttle, nullptr); } crimson::net::SocketConnectionRef SocketMessenger::lookup_conn(const entity_addr_t& addr) { + assert(seastar::this_shard_id() == sid); if (auto found = connections.find(addr); found != connections.end()) { return found->second; @@ -401,16 +426,19 @@ crimson::net::SocketConnectionRef SocketMessenger::lookup_conn(const entity_addr void SocketMessenger::accept_conn(SocketConnectionRef conn) { + assert(seastar::this_shard_id() == sid); accepting_conns.insert(conn); } void SocketMessenger::unaccept_conn(SocketConnectionRef conn) { + assert(seastar::this_shard_id() == sid); accepting_conns.erase(conn); } void SocketMessenger::register_conn(SocketConnectionRef conn) { + assert(seastar::this_shard_id() == sid); auto [i, added] = connections.emplace(conn->get_peer_addr(), conn); std::ignore = i; ceph_assert(added); @@ -418,6 +446,7 @@ void SocketMessenger::register_conn(SocketConnectionRef conn) void SocketMessenger::unregister_conn(SocketConnectionRef conn) { + assert(seastar::this_shard_id() == sid); ceph_assert(conn); auto found = connections.find(conn->get_peer_addr()); ceph_assert(found != connections.end()); @@ -427,11 +456,13 @@ void SocketMessenger::unregister_conn(SocketConnectionRef conn) void SocketMessenger::closing_conn(SocketConnectionRef conn) { + assert(seastar::this_shard_id() == sid); closing_conns.push_back(conn); } void SocketMessenger::closed_conn(SocketConnectionRef conn) { + assert(seastar::this_shard_id() == sid); for (auto it = closing_conns.begin(); it != closing_conns.end();) { if (*it == conn) { @@ -444,6 +475,7 @@ void SocketMessenger::closed_conn(SocketConnectionRef conn) uint32_t SocketMessenger::get_global_seq(uint32_t old) { + assert(seastar::this_shard_id() == sid); if (old > global_seq) { global_seq = old; } diff --git a/ceph/src/crimson/net/SocketMessenger.h b/ceph/src/crimson/net/SocketMessenger.h index 4eebaab30..e4ac63184 100644 --- a/ceph/src/crimson/net/SocketMessenger.h +++ b/ceph/src/crimson/net/SocketMessenger.h @@ -29,41 +29,16 @@ namespace crimson::net { -class FixedCPUServerSocket; +class ShardedServerSocket; class SocketMessenger final : public Messenger { - const seastar::shard_id master_sid; - // Distinguish messengers with meaningful names for debugging - const std::string logic_name; - const uint32_t nonce; - - entity_name_t my_name; - entity_addrvec_t my_addrs; - crimson::auth::AuthClient* auth_client = nullptr; - crimson::auth::AuthServer* auth_server = nullptr; - - FixedCPUServerSocket* listener = nullptr; - ChainedDispatchers dispatchers; - std::map connections; - std::set accepting_conns; - std::vector closing_conns; - ceph::net::PolicySet policy_set; - // specifying we haven't learned our addr; set false when we find it. - bool need_addr = true; - uint32_t global_seq = 0; - bool started = false; - seastar::promise<> shutdown_promise; - - listen_ertr::future<> do_listen(const entity_addrvec_t& addr); - /// try to bind to the first unused port of given address - bind_ertr::future<> try_bind(const entity_addrvec_t& addr, - uint32_t min_port, uint32_t max_port); - - - public: +// Messenger public interfaces +public: SocketMessenger(const entity_name_t& myname, const std::string& logic_name, - uint32_t nonce); + uint32_t nonce, + bool dispatch_only_on_this_shard); + ~SocketMessenger() override; const entity_name_t &get_myname() const override { @@ -76,18 +51,18 @@ class SocketMessenger final : public Messenger { void set_myaddrs(const entity_addrvec_t& addr) override; + bool set_addr_unknowns(const entity_addrvec_t &addr) override; + void set_auth_client(crimson::auth::AuthClient *ac) override { + assert(seastar::this_shard_id() == sid); auth_client = ac; } void set_auth_server(crimson::auth::AuthServer *as) override { + assert(seastar::this_shard_id() == sid); auth_server = as; } - - bool set_addr_unknowns(const entity_addrvec_t &addr) override; - // Messenger interfaces are assumed to be called from its own shard, but its - // behavior should be symmetric when called from any shard. bind_ertr::future<> bind(const entity_addrvec_t& addr) override; seastar::future<> start(const dispatchers_t& dispatchers) override; @@ -96,20 +71,23 @@ class SocketMessenger final : public Messenger { const entity_name_t& peer_name) override; bool owns_connection(Connection &conn) const override { + assert(seastar::this_shard_id() == sid); return this == &static_cast(conn).get_messenger(); } // can only wait once seastar::future<> wait() override { - assert(seastar::this_shard_id() == master_sid); + assert(seastar::this_shard_id() == sid); return shutdown_promise.get_future(); } void stop() override { + assert(seastar::this_shard_id() == sid); dispatchers.clear(); } bool is_started() const override { + assert(seastar::this_shard_id() == sid); return !dispatchers.empty(); } @@ -131,10 +109,17 @@ class SocketMessenger final : public Messenger { void set_policy_throttler(entity_type_t peer_type, Throttle* throttle) override; - public: - crimson::auth::AuthClient* get_auth_client() const { return auth_client; } +// SocketMessenger public interfaces +public: + crimson::auth::AuthClient* get_auth_client() const { + assert(seastar::this_shard_id() == sid); + return auth_client; + } - crimson::auth::AuthServer* get_auth_server() const { return auth_server; } + crimson::auth::AuthServer* get_auth_server() const { + assert(seastar::this_shard_id() == sid); + return auth_server; + } uint32_t get_global_seq(uint32_t old=0); @@ -142,16 +127,21 @@ class SocketMessenger final : public Messenger { const SocketConnection& conn); SocketConnectionRef lookup_conn(const entity_addr_t& addr); + void accept_conn(SocketConnectionRef); + void unaccept_conn(SocketConnectionRef); + void register_conn(SocketConnectionRef); + void unregister_conn(SocketConnectionRef); + void closing_conn(SocketConnectionRef); + void closed_conn(SocketConnectionRef); - seastar::shard_id shard_id() const { - assert(seastar::this_shard_id() == master_sid); - return master_sid; + seastar::shard_id get_shard_id() const { + return sid; } #ifdef UNIT_TESTS_BUILT @@ -161,6 +151,38 @@ class SocketMessenger final : public Messenger { Interceptor *interceptor = nullptr; #endif + +private: + seastar::future<> accept(SocketFRef &&, const entity_addr_t &); + + listen_ertr::future<> do_listen(const entity_addrvec_t& addr); + + /// try to bind to the first unused port of given address + bind_ertr::future<> try_bind(const entity_addrvec_t& addr, + uint32_t min_port, uint32_t max_port); + + const seastar::shard_id sid; + // Distinguish messengers with meaningful names for debugging + const std::string logic_name; + const uint32_t nonce; + const bool dispatch_only_on_sid; + + entity_name_t my_name; + entity_addrvec_t my_addrs; + crimson::auth::AuthClient* auth_client = nullptr; + crimson::auth::AuthServer* auth_server = nullptr; + + ShardedServerSocket *listener = nullptr; + ChainedDispatchers dispatchers; + std::map connections; + std::set accepting_conns; + std::vector closing_conns; + ceph::net::PolicySet policy_set; + // specifying we haven't learned our addr; set false when we find it. + bool need_addr = true; + uint32_t global_seq = 0; + bool started = false; + seastar::promise<> shutdown_promise; }; } // namespace crimson::net diff --git a/ceph/src/crimson/net/chained_dispatchers.cc b/ceph/src/crimson/net/chained_dispatchers.cc index b13d40c8f..1e4af3baa 100644 --- a/ceph/src/crimson/net/chained_dispatchers.cc +++ b/ceph/src/crimson/net/chained_dispatchers.cc @@ -13,7 +13,7 @@ namespace { namespace crimson::net { seastar::future<> -ChainedDispatchers::ms_dispatch(crimson::net::ConnectionRef conn, +ChainedDispatchers::ms_dispatch(ConnectionRef conn, MessageRef m) { try { for (auto& dispatcher : dispatchers) { @@ -39,10 +39,29 @@ ChainedDispatchers::ms_dispatch(crimson::net::ConnectionRef conn, } void -ChainedDispatchers::ms_handle_accept(crimson::net::ConnectionRef conn) { +ChainedDispatchers::ms_handle_shard_change( + ConnectionRef conn, + seastar::shard_id new_shard, + bool ac) { try { for (auto& dispatcher : dispatchers) { - dispatcher->ms_handle_accept(conn); + dispatcher->ms_handle_shard_change(conn, new_shard, ac); + } + } catch (...) { + logger().error("{} got unexpected exception in ms_handle_shard_change() {}", + *conn, std::current_exception()); + ceph_abort(); + } +} + +void +ChainedDispatchers::ms_handle_accept( + ConnectionRef conn, + seastar::shard_id prv_shard, + bool is_replace) { + try { + for (auto& dispatcher : dispatchers) { + dispatcher->ms_handle_accept(conn, prv_shard, is_replace); } } catch (...) { logger().error("{} got unexpected exception in ms_handle_accept() {}", @@ -52,10 +71,12 @@ ChainedDispatchers::ms_handle_accept(crimson::net::ConnectionRef conn) { } void -ChainedDispatchers::ms_handle_connect(crimson::net::ConnectionRef conn) { +ChainedDispatchers::ms_handle_connect( + ConnectionRef conn, + seastar::shard_id prv_shard) { try { for(auto& dispatcher : dispatchers) { - dispatcher->ms_handle_connect(conn); + dispatcher->ms_handle_connect(conn, prv_shard); } } catch (...) { logger().error("{} got unexpected exception in ms_handle_connect() {}", @@ -65,7 +86,7 @@ ChainedDispatchers::ms_handle_connect(crimson::net::ConnectionRef conn) { } void -ChainedDispatchers::ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) { +ChainedDispatchers::ms_handle_reset(ConnectionRef conn, bool is_replace) { try { for (auto& dispatcher : dispatchers) { dispatcher->ms_handle_reset(conn, is_replace); @@ -78,7 +99,7 @@ ChainedDispatchers::ms_handle_reset(crimson::net::ConnectionRef conn, bool is_re } void -ChainedDispatchers::ms_handle_remote_reset(crimson::net::ConnectionRef conn) { +ChainedDispatchers::ms_handle_remote_reset(ConnectionRef conn) { try { for (auto& dispatcher : dispatchers) { dispatcher->ms_handle_remote_reset(conn); diff --git a/ceph/src/crimson/net/chained_dispatchers.h b/ceph/src/crimson/net/chained_dispatchers.h index 712b0894b..ec085864f 100644 --- a/ceph/src/crimson/net/chained_dispatchers.h +++ b/ceph/src/crimson/net/chained_dispatchers.h @@ -3,6 +3,8 @@ #pragma once +#include + #include "Fwd.h" #include "crimson/common/log.h" @@ -23,11 +25,12 @@ public: bool empty() const { return dispatchers.empty(); } - seastar::future<> ms_dispatch(crimson::net::ConnectionRef, MessageRef); - void ms_handle_accept(crimson::net::ConnectionRef conn); - void ms_handle_connect(crimson::net::ConnectionRef conn); - void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace); - void ms_handle_remote_reset(crimson::net::ConnectionRef conn); + seastar::future<> ms_dispatch(ConnectionRef, MessageRef); + void ms_handle_shard_change(ConnectionRef, seastar::shard_id, bool); + void ms_handle_accept(ConnectionRef conn, seastar::shard_id, bool is_replace); + void ms_handle_connect(ConnectionRef conn, seastar::shard_id); + void ms_handle_reset(ConnectionRef conn, bool is_replace); + void ms_handle_remote_reset(ConnectionRef conn); private: dispatchers_t dispatchers; diff --git a/ceph/src/crimson/net/io_handler.cc b/ceph/src/crimson/net/io_handler.cc index 80d578363..c414c48e1 100644 --- a/ceph/src/crimson/net/io_handler.cc +++ b/ceph/src/crimson/net/io_handler.cc @@ -47,18 +47,28 @@ namespace crimson::net { IOHandler::IOHandler(ChainedDispatchers &dispatchers, SocketConnection &conn) - : dispatchers(dispatchers), + : shard_states(shard_states_t::create( + seastar::this_shard_id(), io_state_t::none)), + dispatchers(dispatchers), conn(conn), conn_ref(conn.get_local_shared_foreign_from_this()) {} IOHandler::~IOHandler() { - ceph_assert(gate.is_closed()); - assert(!out_exit_dispatching); + // close_io() must be finished + ceph_assert_always(maybe_prv_shard_states == nullptr); + // should be true in the according shard + // ceph_assert_always(shard_states->assert_closed_and_exit()); + assert(!conn_ref); } -ceph::bufferlist IOHandler::sweep_out_pending_msgs_to_sent( +#ifdef UNIT_TESTS_BUILT +IOHandler::sweep_ret +#else +ceph::bufferlist +#endif +IOHandler::sweep_out_pending_msgs_to_sent( bool require_keepalive, std::optional maybe_keepalive_ack, bool require_ack) @@ -66,25 +76,45 @@ ceph::bufferlist IOHandler::sweep_out_pending_msgs_to_sent( std::size_t num_msgs = out_pending_msgs.size(); ceph::bufferlist bl; +#ifdef UNIT_TESTS_BUILT + std::vector tags; +#endif + if (unlikely(require_keepalive)) { auto keepalive_frame = KeepAliveFrame::Encode(); bl.append(frame_assembler->get_buffer(keepalive_frame)); +#ifdef UNIT_TESTS_BUILT + auto tag = KeepAliveFrame::tag; + tags.push_back(tag); +#endif } if (unlikely(maybe_keepalive_ack.has_value())) { auto keepalive_ack_frame = KeepAliveFrameAck::Encode(*maybe_keepalive_ack); bl.append(frame_assembler->get_buffer(keepalive_ack_frame)); +#ifdef UNIT_TESTS_BUILT + auto tag = KeepAliveFrameAck::tag; + tags.push_back(tag); +#endif } if (require_ack && num_msgs == 0u) { - auto ack_frame = AckFrame::Encode(get_in_seq()); + auto ack_frame = AckFrame::Encode(in_seq); bl.append(frame_assembler->get_buffer(ack_frame)); +#ifdef UNIT_TESTS_BUILT + auto tag = AckFrame::tag; + tags.push_back(tag); +#endif } std::for_each( out_pending_msgs.begin(), out_pending_msgs.begin()+num_msgs, - [this, &bl](const MessageURef& msg) { + [this, &bl +#ifdef UNIT_TESTS_BUILT + , &tags +#endif + ](const MessageFRef& msg) { // set priority msg->get_header().src = conn.messenger.get_myname(); @@ -100,7 +130,7 @@ ceph::bufferlist IOHandler::sweep_out_pending_msgs_to_sent( header.type, header.priority, header.version, ceph_le32(0), header.data_off, - ceph_le64(get_in_seq()), + ceph_le64(in_seq), footer.flags, header.compat_version, header.reserved}; @@ -109,6 +139,10 @@ ceph::bufferlist IOHandler::sweep_out_pending_msgs_to_sent( logger().debug("{} --> #{} === {} ({})", conn, msg->get_seq(), *msg, msg->get_type()); bl.append(frame_assembler->get_buffer(message)); +#ifdef UNIT_TESTS_BUILT + auto tag = MessageFrame::tag; + tags.push_back(tag); +#endif }); if (!conn.policy.lossy) { @@ -118,12 +152,49 @@ ceph::bufferlist IOHandler::sweep_out_pending_msgs_to_sent( std::make_move_iterator(out_pending_msgs.end())); } out_pending_msgs.clear(); + +#ifdef UNIT_TESTS_BUILT + return sweep_ret{std::move(bl), tags}; +#else return bl; +#endif } -seastar::future<> IOHandler::send(MessageURef msg) +seastar::future<> IOHandler::send(MessageFRef msg) { - if (io_state != io_state_t::drop) { + // sid may be changed on-the-fly during the submission + if (seastar::this_shard_id() == get_shard_id()) { + return do_send(std::move(msg)); + } else { + logger().trace("{} send() is directed to {} -- {}", + conn, get_shard_id(), *msg); + return seastar::smp::submit_to( + get_shard_id(), [this, msg=std::move(msg)]() mutable { + return send_redirected(std::move(msg)); + }); + } +} + +seastar::future<> IOHandler::send_redirected(MessageFRef msg) +{ + // sid may be changed on-the-fly during the submission + if (seastar::this_shard_id() == get_shard_id()) { + return do_send(std::move(msg)); + } else { + logger().debug("{} send() is redirected to {} -- {}", + conn, get_shard_id(), *msg); + return seastar::smp::submit_to( + get_shard_id(), [this, msg=std::move(msg)]() mutable { + return send_redirected(std::move(msg)); + }); + } +} + +seastar::future<> IOHandler::do_send(MessageFRef msg) +{ + assert(seastar::this_shard_id() == get_shard_id()); + logger().trace("{} do_send() got message -- {}", conn, *msg); + if (get_io_state() != io_state_t::drop) { out_pending_msgs.push_back(std::move(msg)); notify_out_dispatch(); } @@ -132,6 +203,36 @@ seastar::future<> IOHandler::send(MessageURef msg) seastar::future<> IOHandler::send_keepalive() { + // sid may be changed on-the-fly during the submission + if (seastar::this_shard_id() == get_shard_id()) { + return do_send_keepalive(); + } else { + logger().trace("{} send_keepalive() is directed to {}", conn, get_shard_id()); + return seastar::smp::submit_to( + get_shard_id(), [this] { + return send_keepalive_redirected(); + }); + } +} + +seastar::future<> IOHandler::send_keepalive_redirected() +{ + // sid may be changed on-the-fly during the submission + if (seastar::this_shard_id() == get_shard_id()) { + return do_send_keepalive(); + } else { + logger().debug("{} send_keepalive() is redirected to {}", conn, get_shard_id()); + return seastar::smp::submit_to( + get_shard_id(), [this] { + return send_keepalive_redirected(); + }); + } +} + +seastar::future<> IOHandler::do_send_keepalive() +{ + assert(seastar::this_shard_id() == get_shard_id()); + logger().trace("{} do_send_keeplive(): need_keepalive={}", conn, need_keepalive); if (!need_keepalive) { need_keepalive = true; notify_out_dispatch(); @@ -141,22 +242,31 @@ seastar::future<> IOHandler::send_keepalive() void IOHandler::mark_down() { - ceph_assert_always(io_state != io_state_t::none); + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + ceph_assert_always(get_io_state() != io_state_t::none); need_dispatch_reset = false; - if (io_state == io_state_t::drop) { + if (get_io_state() == io_state_t::drop) { return; } - logger().info("{} mark_down() with {}", - conn, io_stat_printer{*this}); - set_io_state(io_state_t::drop); - handshake_listener->notify_mark_down(); + auto cc_seq = crosscore.prepare_submit(); + logger().info("{} mark_down() at {}, send {} notify_mark_down()", + conn, io_stat_printer{*this}, cc_seq); + do_set_io_state(io_state_t::drop); + shard_states->dispatch_in_background( + "notify_mark_down", conn, [this, cc_seq] { + return seastar::smp::submit_to( + conn.get_messenger_shard_id(), [this, cc_seq] { + return handshake_listener->notify_mark_down(cc_seq); + }); + }); } void IOHandler::print_io_stat(std::ostream &out) const { + assert(seastar::this_shard_id() == get_shard_id()); out << "io_stat(" - << "io_state=" << fmt::format("{}", io_state) + << "io_state=" << fmt::format("{}", get_io_state()) << ", in_seq=" << in_seq << ", out_seq=" << out_seq << ", out_pending_msgs_size=" << out_pending_msgs.size() @@ -167,49 +277,80 @@ void IOHandler::print_io_stat(std::ostream &out) const << ")"; } -void IOHandler::set_io_state( - const IOHandler::io_state_t &new_state, - FrameAssemblerV2Ref fa) +void IOHandler::assign_frame_assembler(FrameAssemblerV2Ref fa) +{ + assert(fa != nullptr); + ceph_assert_always(frame_assembler == nullptr); + frame_assembler = std::move(fa); + ceph_assert_always( + frame_assembler->get_shard_id() == get_shard_id()); + // should have been set through dispatch_accept/connect() + ceph_assert_always( + frame_assembler->get_socket_shard_id() == get_shard_id()); + ceph_assert_always(frame_assembler->is_socket_valid()); +} + +void IOHandler::do_set_io_state( + io_state_t new_state, + std::optional cc_seq, + FrameAssemblerV2Ref fa, + bool set_notify_out) { + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + auto prv_state = get_io_state(); + logger().debug("{} got {}do_set_io_state(): prv_state={}, new_state={}, " + "fa={}, set_notify_out={}, at {}", + conn, + cc_seq.has_value() ? fmt::format("{} ", *cc_seq) : "", + prv_state, new_state, + fa ? "present" : "N/A", set_notify_out, + io_stat_printer{*this}); ceph_assert_always(!( - (new_state == io_state_t::none && io_state != io_state_t::none) || - (new_state == io_state_t::open && io_state == io_state_t::open) || - (new_state != io_state_t::drop && io_state == io_state_t::drop) + (new_state == io_state_t::none && prv_state != io_state_t::none) || + (new_state == io_state_t::open && prv_state == io_state_t::open) )); + if (prv_state == io_state_t::drop) { + // only possible due to a racing mark_down() from user + if (new_state == io_state_t::open) { + assign_frame_assembler(std::move(fa)); + frame_assembler->shutdown_socket(nullptr); + } else { + assert(fa == nullptr); + } + return; + } + bool dispatch_in = false; if (new_state == io_state_t::open) { // to open ceph_assert_always(protocol_is_connected == true); - assert(fa != nullptr); - ceph_assert_always(frame_assembler == nullptr); - frame_assembler = std::move(fa); - ceph_assert_always(frame_assembler->is_socket_valid()); + assign_frame_assembler(std::move(fa)); dispatch_in = true; -#ifdef UNIT_TESTS_BUILT - if (conn.interceptor) { - conn.interceptor->register_conn_ready(conn); - } -#endif - } else if (io_state == io_state_t::open) { + } else if (prv_state == io_state_t::open) { // from open ceph_assert_always(protocol_is_connected == true); protocol_is_connected = false; assert(fa == nullptr); ceph_assert_always(frame_assembler->is_socket_valid()); - frame_assembler->shutdown_socket(); - if (out_dispatching) { - ceph_assert_always(!out_exit_dispatching.has_value()); - out_exit_dispatching = seastar::promise<>(); - } + frame_assembler->shutdown_socket(nullptr); } else { assert(fa == nullptr); } - if (io_state != new_state) { - io_state = new_state; - io_state_changed.set_value(); - io_state_changed = seastar::promise<>(); + if (new_state == io_state_t::delay) { + need_notify_out = set_notify_out; + if (need_notify_out) { + maybe_notify_out_dispatch(); + } + } else { + assert(set_notify_out == false); + need_notify_out = false; + } + + // FIXME: simplify and drop the prv_state == new_state case + if (prv_state != new_state) { + shard_states->set_io_state(new_state); } /* @@ -221,44 +362,141 @@ void IOHandler::set_io_state( } } -seastar::future IOHandler::wait_io_exit_dispatching() +seastar::future<> IOHandler::set_io_state( + crosscore_t::seq_t cc_seq, + io_state_t new_state, + FrameAssemblerV2Ref fa, + bool set_notify_out) { - ceph_assert_always(io_state != io_state_t::open); + assert(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} set_io_state(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq, new_state, + fa=std::move(fa), set_notify_out]() mutable { + return set_io_state(cc_seq, new_state, std::move(fa), set_notify_out); + }); + } + + do_set_io_state(new_state, cc_seq, std::move(fa), set_notify_out); + return seastar::now(); +} + +seastar::future +IOHandler::wait_io_exit_dispatching( + crosscore_t::seq_t cc_seq) +{ + assert(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} wait_io_exit_dispatching(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq] { + return wait_io_exit_dispatching(cc_seq); + }); + } + + logger().debug("{} got {} wait_io_exit_dispatching()", + conn, cc_seq); + ceph_assert_always(get_io_state() != io_state_t::open); ceph_assert_always(frame_assembler != nullptr); ceph_assert_always(!frame_assembler->is_socket_valid()); - return seastar::when_all( - [this] { - if (out_exit_dispatching) { - return out_exit_dispatching->get_future(); - } else { - return seastar::now(); - } - }(), - [this] { - if (in_exit_dispatching) { - return in_exit_dispatching->get_future(); - } else { - return seastar::now(); - } - }() - ).discard_result().then([this] { - return std::move(frame_assembler); + return seastar::futurize_invoke([this] { + // cannot be running in parallel with to_new_sid() + if (maybe_dropped_sid.has_value()) { + ceph_assert_always(get_io_state() == io_state_t::drop); + assert(shard_states->assert_closed_and_exit()); + auto prv_sid = *maybe_dropped_sid; + return seastar::smp::submit_to(prv_sid, [this] { + logger().debug("{} got wait_io_exit_dispatching from prv_sid", conn); + assert(maybe_prv_shard_states != nullptr); + return maybe_prv_shard_states->wait_io_exit_dispatching(); + }); + } else { + return shard_states->wait_io_exit_dispatching(); + } + }).then([this] { + logger().debug("{} finish wait_io_exit_dispatching at {}", + conn, io_stat_printer{*this}); + ceph_assert_always(frame_assembler != nullptr); + ceph_assert_always(!frame_assembler->is_socket_valid()); + frame_assembler->set_shard_id(conn.get_messenger_shard_id()); + return exit_dispatching_ret{ + std::move(frame_assembler), + get_states()}; }); } -void IOHandler::reset_session(bool full) +seastar::future<> IOHandler::reset_session( + crosscore_t::seq_t cc_seq, + bool full) { - // reset in - in_seq = 0; + assert(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} reset_session(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq, full] { + return reset_session(cc_seq, full); + }); + } + + logger().debug("{} got {} reset_session({})", + conn, cc_seq, full); + assert(get_io_state() != io_state_t::open); + reset_in(); if (full) { reset_out(); dispatch_remote_reset(); } + return seastar::now(); } -void IOHandler::requeue_out_sent() +seastar::future<> IOHandler::reset_peer_state( + crosscore_t::seq_t cc_seq) { - assert(io_state != io_state_t::open); + assert(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} reset_peer_state(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq] { + return reset_peer_state(cc_seq); + }); + } + + logger().debug("{} got {} reset_peer_state()", + conn, cc_seq); + assert(get_io_state() != io_state_t::open); + reset_in(); + do_requeue_out_sent_up_to(0); + discard_out_sent(); + return seastar::now(); +} + +seastar::future<> IOHandler::requeue_out_sent( + crosscore_t::seq_t cc_seq) +{ + assert(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} requeue_out_sent(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq] { + return requeue_out_sent(cc_seq); + }); + } + + logger().debug("{} got {} requeue_out_sent()", + conn, cc_seq); + do_requeue_out_sent(); + return seastar::now(); +} + +void IOHandler::do_requeue_out_sent() +{ + assert(get_io_state() != io_state_t::open); if (out_sent_msgs.empty()) { return; } @@ -266,7 +504,7 @@ void IOHandler::requeue_out_sent() out_seq -= out_sent_msgs.size(); logger().debug("{} requeue {} items, revert out_seq to {}", conn, out_sent_msgs.size(), out_seq); - for (MessageURef& msg : out_sent_msgs) { + for (MessageFRef& msg : out_sent_msgs) { msg->clear_payload(); msg->set_seq(0); } @@ -275,12 +513,32 @@ void IOHandler::requeue_out_sent() std::make_move_iterator(out_sent_msgs.begin()), std::make_move_iterator(out_sent_msgs.end())); out_sent_msgs.clear(); - notify_out_dispatch(); + maybe_notify_out_dispatch(); } -void IOHandler::requeue_out_sent_up_to(seq_num_t seq) +seastar::future<> IOHandler::requeue_out_sent_up_to( + crosscore_t::seq_t cc_seq, + seq_num_t msg_seq) { - assert(io_state != io_state_t::open); + assert(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} requeue_out_sent_up_to(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq, msg_seq] { + return requeue_out_sent_up_to(cc_seq, msg_seq); + }); + } + + logger().debug("{} got {} requeue_out_sent_up_to({})", + conn, cc_seq, msg_seq); + do_requeue_out_sent_up_to(msg_seq); + return seastar::now(); +} + +void IOHandler::do_requeue_out_sent_up_to(seq_num_t seq) +{ + assert(get_io_state() != io_state_t::open); if (out_sent_msgs.empty() && out_pending_msgs.empty()) { logger().debug("{} nothing to requeue, reset out_seq from {} to seq {}", conn, out_seq, seq); @@ -297,57 +555,233 @@ void IOHandler::requeue_out_sent_up_to(seq_num_t seq) out_sent_msgs.pop_front(); } } - requeue_out_sent(); + do_requeue_out_sent(); +} + +void IOHandler::reset_in() +{ + assert(get_io_state() != io_state_t::open); + in_seq = 0; } void IOHandler::reset_out() { - assert(io_state != io_state_t::open); - out_seq = 0; + assert(get_io_state() != io_state_t::open); + discard_out_sent(); out_pending_msgs.clear(); - out_sent_msgs.clear(); need_keepalive = false; next_keepalive_ack = std::nullopt; ack_left = 0; } -void IOHandler::dispatch_accept() +void IOHandler::discard_out_sent() { - if (io_state == io_state_t::drop) { - return; - } - // protocol_is_connected can be from true to true here if the replacing is - // happening to a connected connection. - protocol_is_connected = true; - dispatchers.ms_handle_accept(conn_ref); + assert(get_io_state() != io_state_t::open); + out_seq = 0; + out_sent_msgs.clear(); } -void IOHandler::dispatch_connect() +seastar::future<> +IOHandler::dispatch_accept( + crosscore_t::seq_t cc_seq, + seastar::shard_id new_sid, + ConnectionFRef conn_fref, + bool is_replace) { - if (io_state == io_state_t::drop) { - return; + return to_new_sid(cc_seq, new_sid, std::move(conn_fref), is_replace); +} + +seastar::future<> +IOHandler::dispatch_connect( + crosscore_t::seq_t cc_seq, + seastar::shard_id new_sid, + ConnectionFRef conn_fref) +{ + return to_new_sid(cc_seq, new_sid, std::move(conn_fref), std::nullopt); +} + +seastar::future<> +IOHandler::cleanup_prv_shard(seastar::shard_id prv_sid) +{ + assert(seastar::this_shard_id() == get_shard_id()); + return seastar::smp::submit_to(prv_sid, [this] { + logger().debug("{} got cleanup_prv_shard()", conn); + assert(maybe_prv_shard_states != nullptr); + auto ref_prv_states = std::move(maybe_prv_shard_states); + auto &prv_states = *ref_prv_states; + return prv_states.close( + ).then([ref_prv_states=std::move(ref_prv_states)] { + ceph_assert_always(ref_prv_states->assert_closed_and_exit()); + }); + }).then([this] { + ceph_assert_always(maybe_prv_shard_states == nullptr); + }); +} + +seastar::future<> +IOHandler::to_new_sid( + crosscore_t::seq_t cc_seq, + seastar::shard_id new_sid, + ConnectionFRef conn_fref, + std::optional is_replace) +{ + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} to_new_sid(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq, new_sid, is_replace, + conn_fref=std::move(conn_fref)]() mutable { + return to_new_sid(cc_seq, new_sid, std::move(conn_fref), is_replace); + }); } - ceph_assert_always(protocol_is_connected == false); - protocol_is_connected = true; - dispatchers.ms_handle_connect(conn_ref); + + bool is_accept_or_connect = is_replace.has_value(); + logger().debug("{} got {} to_new_sid_1(new_sid={}, {}) at {}", + conn, cc_seq, new_sid, + fmt::format("{}", + is_accept_or_connect ? + (*is_replace ? "accept(replace)" : "accept(!replace)") : + "connect"), + io_stat_printer{*this}); + auto next_cc_seq = ++cc_seq; + + if (get_io_state() != io_state_t::drop) { + ceph_assert_always(conn_ref); + if (new_sid != seastar::this_shard_id()) { + dispatchers.ms_handle_shard_change(conn_ref, new_sid, is_accept_or_connect); + // user can make changes + } + } else { + // it is possible that both io_handler and protocolv2 are + // trying to close each other from different cores simultaneously. + assert(!protocol_is_connected); + } + + if (get_io_state() != io_state_t::drop) { + if (is_accept_or_connect) { + // protocol_is_connected can be from true to true here if the replacing is + // happening to a connected connection. + } else { + ceph_assert_always(protocol_is_connected == false); + } + protocol_is_connected = true; + } else { + assert(!protocol_is_connected); + } + + bool is_dropped = false; + if (get_io_state() == io_state_t::drop) { + is_dropped = true; + } + ceph_assert_always(get_io_state() != io_state_t::open); + + // apply the switching atomically + ceph_assert_always(conn_ref); + conn_ref.reset(); + auto prv_sid = get_shard_id(); + ceph_assert_always(maybe_prv_shard_states == nullptr); + maybe_prv_shard_states = std::move(shard_states); + shard_states = shard_states_t::create_from_previous( + *maybe_prv_shard_states, new_sid); + assert(new_sid == get_shard_id()); + + return seastar::smp::submit_to(new_sid, + [this, next_cc_seq, is_dropped, prv_sid, is_replace, conn_fref=std::move(conn_fref)]() mutable { + logger().debug("{} got {} to_new_sid_2(prv_sid={}, is_dropped={}, {}) at {}", + conn, next_cc_seq, prv_sid, is_dropped, + fmt::format("{}", + is_replace.has_value() ? + (*is_replace ? "accept(replace)" : "accept(!replace)") : + "connect"), + io_stat_printer{*this}); + + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + ceph_assert_always(get_io_state() != io_state_t::open); + ceph_assert_always(!maybe_dropped_sid.has_value()); + ceph_assert_always(crosscore.proceed_or_wait(next_cc_seq)); + + if (is_dropped) { + ceph_assert_always(get_io_state() == io_state_t::drop); + ceph_assert_always(shard_states->assert_closed_and_exit()); + maybe_dropped_sid = prv_sid; + // cleanup_prv_shard() will be done in a follow-up close_io() + } else { + // possible at io_state_t::drop + + // previous shard is not cleaned, + // but close_io() is responsible to clean up the current shard, + // so cleanup the previous shard here. + shard_states->dispatch_in_background( + "cleanup_prv_sid", conn, [this, prv_sid] { + return cleanup_prv_shard(prv_sid); + }); + maybe_notify_out_dispatch(); + } + + ceph_assert_always(!conn_ref); + // assign even if already dropping + conn_ref = make_local_shared_foreign(std::move(conn_fref)); + + if (get_io_state() != io_state_t::drop) { + if (is_replace.has_value()) { + dispatchers.ms_handle_accept(conn_ref, prv_sid, *is_replace); + } else { + dispatchers.ms_handle_connect(conn_ref, prv_sid); + } + // user can make changes + } + }); +} + +seastar::future<> IOHandler::set_accepted_sid( + crosscore_t::seq_t cc_seq, + seastar::shard_id sid, + ConnectionFRef conn_fref) +{ + assert(seastar::this_shard_id() == get_shard_id()); + assert(get_io_state() == io_state_t::none); + ceph_assert_always(conn_ref); + conn_ref.reset(); + assert(maybe_prv_shard_states == nullptr); + shard_states.reset(); + shard_states = shard_states_t::create(sid, io_state_t::none); + return seastar::smp::submit_to(sid, + [this, cc_seq, conn_fref=std::move(conn_fref)]() mutable { + // must be the first to proceed + ceph_assert_always(crosscore.proceed_or_wait(cc_seq)); + + logger().debug("{} set accepted sid", conn); + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + ceph_assert_always(get_io_state() == io_state_t::none); + assert(maybe_prv_shard_states == nullptr); + ceph_assert_always(!conn_ref); + conn_ref = make_local_shared_foreign(std::move(conn_fref)); + }); } void IOHandler::dispatch_reset(bool is_replace) { - ceph_assert_always(io_state == io_state_t::drop); + ceph_assert_always(get_io_state() == io_state_t::drop); if (!need_dispatch_reset) { return; } need_dispatch_reset = false; + ceph_assert_always(conn_ref); + dispatchers.ms_handle_reset(conn_ref, is_replace); + // user can make changes } void IOHandler::dispatch_remote_reset() { - if (io_state == io_state_t::drop) { + if (get_io_state() == io_state_t::drop) { return; } + ceph_assert_always(conn_ref); + dispatchers.ms_handle_remote_reset(conn_ref); + // user can make changes } void IOHandler::ack_out_sent(seq_num_t seq) @@ -364,85 +798,74 @@ void IOHandler::ack_out_sent(seq_num_t seq) } } -seastar::future IOHandler::try_exit_out_dispatch() { - assert(!is_out_queued()); - return frame_assembler->flush( - ).then([this] { - if (!is_out_queued()) { - // still nothing pending to send after flush, - // the dispatching can ONLY stop now - ceph_assert(out_dispatching); - out_dispatching = false; - if (unlikely(out_exit_dispatching.has_value())) { - out_exit_dispatching->set_value(); - out_exit_dispatching = std::nullopt; - logger().info("{} do_out_dispatch: nothing queued at {}," - " set out_exit_dispatching", - conn, io_state); - } - return seastar::make_ready_future(stop_t::yes); - } else { - // something is pending to send during flushing - return seastar::make_ready_future(stop_t::no); - } - }); -} - -seastar::future<> IOHandler::do_out_dispatch() +seastar::future<> +IOHandler::do_out_dispatch(shard_states_t &ctx) { - return seastar::repeat([this] { - switch (io_state) { + return seastar::repeat([this, &ctx] { + switch (ctx.get_io_state()) { case io_state_t::open: { - bool still_queued = is_out_queued(); - if (unlikely(!still_queued)) { - return try_exit_out_dispatch(); + if (unlikely(!is_out_queued())) { + // try exit open dispatching + return frame_assembler->flush( + ).then([this, &ctx] { + if (ctx.get_io_state() != io_state_t::open || is_out_queued()) { + return seastar::make_ready_future(stop_t::no); + } + // still nothing pending to send after flush, + // open dispatching can ONLY stop now + ctx.exit_out_dispatching("exit-open", conn); + return seastar::make_ready_future(stop_t::yes); + }); } + + auto require_keepalive = need_keepalive; + need_keepalive = false; + auto maybe_keepalive_ack = next_keepalive_ack; + next_keepalive_ack = std::nullopt; auto to_ack = ack_left; assert(to_ack == 0 || in_seq > 0); - return frame_assembler->write( - sweep_out_pending_msgs_to_sent( - need_keepalive, next_keepalive_ack, to_ack > 0) - ).then([this, prv_keepalive_ack=next_keepalive_ack, to_ack] { - need_keepalive = false; - if (next_keepalive_ack == prv_keepalive_ack) { - next_keepalive_ack = std::nullopt; - } - assert(ack_left >= to_ack); - ack_left -= to_ack; - if (!is_out_queued()) { - return try_exit_out_dispatch(); - } else { - // messages were enqueued during socket write - return seastar::make_ready_future(stop_t::no); + ack_left = 0; +#ifdef UNIT_TESTS_BUILT + auto ret = sweep_out_pending_msgs_to_sent( + require_keepalive, maybe_keepalive_ack, to_ack > 0); + return frame_assembler->intercept_frames(ret.tags, true + ).then([this, bl=std::move(ret.bl)]() mutable { + return frame_assembler->write(std::move(bl)); + } +#else + auto bl = sweep_out_pending_msgs_to_sent( + require_keepalive, maybe_keepalive_ack, to_ack > 0); + return frame_assembler->write(std::move(bl) +#endif + ).then([this, &ctx] { + if (ctx.get_io_state() != io_state_t::open) { + return frame_assembler->flush( + ).then([] { + return seastar::make_ready_future(stop_t::no); + }); } + + // FIXME: may leak a flush if state is changed after return and before + // the next repeat body. + return seastar::make_ready_future(stop_t::no); }); } case io_state_t::delay: // delay out dispatching until open - if (out_exit_dispatching) { - out_exit_dispatching->set_value(); - out_exit_dispatching = std::nullopt; - logger().info("{} do_out_dispatch: delay and set out_exit_dispatching ...", conn); - } else { - logger().info("{} do_out_dispatch: delay ...", conn); - } - return io_state_changed.get_future( + ctx.notify_out_dispatching_stopped("delay...", conn); + return ctx.wait_state_change( ).then([] { return stop_t::no; }); case io_state_t::drop: - ceph_assert(out_dispatching); - out_dispatching = false; - if (out_exit_dispatching) { - out_exit_dispatching->set_value(); - out_exit_dispatching = std::nullopt; - logger().info("{} do_out_dispatch: dropped and set out_exit_dispatching", conn); - } else { - logger().info("{} do_out_dispatch: dropped", conn); - } + ctx.exit_out_dispatching("dropped", conn); + return seastar::make_ready_future(stop_t::yes); + case io_state_t::switched: + ctx.exit_out_dispatching("switched", conn); return seastar::make_ready_future(stop_t::yes); default: - ceph_assert(false); + ceph_abort("impossible"); } - }).handle_exception_type([this] (const std::system_error& e) { + }).handle_exception_type([this, &ctx](const std::system_error& e) { + auto io_state = ctx.get_io_state(); if (e.code() != std::errc::broken_pipe && e.code() != std::errc::connection_reset && e.code() != error::negotiation_failure) { @@ -452,58 +875,83 @@ seastar::future<> IOHandler::do_out_dispatch() } if (io_state == io_state_t::open) { - logger().info("{} do_out_dispatch(): fault at {}, going to delay -- {}", - conn, io_state, e.what()); + auto cc_seq = crosscore.prepare_submit(); + logger().info("{} do_out_dispatch(): fault at {}, {}, going to delay -- {}, " + "send {} notify_out_fault()", + conn, io_state, io_stat_printer{*this}, e.what(), cc_seq); std::exception_ptr eptr; try { throw e; } catch(...) { eptr = std::current_exception(); } - set_io_state(io_state_t::delay); - handshake_listener->notify_out_fault("do_out_dispatch", eptr); + do_set_io_state(io_state_t::delay); + shard_states->dispatch_in_background( + "notify_out_fault(out)", conn, [this, cc_seq, eptr] { + auto states = get_states(); + return seastar::smp::submit_to( + conn.get_messenger_shard_id(), [this, cc_seq, eptr, states] { + return handshake_listener->notify_out_fault( + cc_seq, "do_out_dispatch", eptr, states); + }); + }); } else { - logger().info("{} do_out_dispatch(): fault at {} -- {}", - conn, io_state, e.what()); + if (io_state != io_state_t::switched) { + logger().info("{} do_out_dispatch(): fault at {}, {} -- {}", + conn, io_state, io_stat_printer{*this}, e.what()); + } else { + logger().info("{} do_out_dispatch(): fault at {} -- {}", + conn, io_state, e.what()); + } } - return do_out_dispatch(); + return do_out_dispatch(ctx); }); } +void IOHandler::maybe_notify_out_dispatch() +{ + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + if (is_out_queued()) { + notify_out_dispatch(); + } +} + void IOHandler::notify_out_dispatch() { - handshake_listener->notify_out(); - if (out_dispatching) { - // already dispatching - return; + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + assert(is_out_queued()); + if (need_notify_out) { + auto cc_seq = crosscore.prepare_submit(); + logger().debug("{} send {} notify_out()", + conn, cc_seq); + shard_states->dispatch_in_background( + "notify_out", conn, [this, cc_seq] { + return seastar::smp::submit_to( + conn.get_messenger_shard_id(), [this, cc_seq] { + return handshake_listener->notify_out(cc_seq); + }); + }); } - out_dispatching = true; - switch (io_state) { - case io_state_t::open: - [[fallthrough]]; - case io_state_t::delay: - assert(!gate.is_closed()); - gate.dispatch_in_background("do_out_dispatch", conn, [this] { - return do_out_dispatch(); + if (shard_states->try_enter_out_dispatching()) { + shard_states->dispatch_in_background( + "do_out_dispatch", conn, [this] { + return do_out_dispatch(*shard_states); }); - return; - case io_state_t::drop: - out_dispatching = false; - return; - default: - ceph_assert(false); } } seastar::future<> -IOHandler::read_message(utime_t throttle_stamp, std::size_t msg_size) +IOHandler::read_message( + shard_states_t &ctx, + utime_t throttle_stamp, + std::size_t msg_size) { - return frame_assembler->read_frame_payload( - ).then([this, throttle_stamp, msg_size](auto payload) { - if (unlikely(io_state != io_state_t::open)) { + return frame_assembler->read_frame_payload( + ).then([this, throttle_stamp, msg_size, &ctx](auto payload) { + if (unlikely(ctx.get_io_state() != io_state_t::open)) { logger().debug("{} triggered {} during read_message()", - conn, io_state); + conn, ctx.get_io_state()); abort_protocol(); } @@ -561,7 +1009,7 @@ IOHandler::read_message(utime_t throttle_stamp, std::size_t msg_size) // client side queueing because messages can't be renumbered, but the (kernel) // client will occasionally pull a message out of the sent queue to send // elsewhere. in that case it doesn't matter if we "got" it or not. - uint64_t cur_seq = get_in_seq(); + uint64_t cur_seq = in_seq; if (message->get_seq() <= cur_seq) { logger().error("{} got old message {} <= {} {}, discarding", conn, message->get_seq(), cur_seq, *message); @@ -605,20 +1053,24 @@ IOHandler::read_message(utime_t throttle_stamp, std::size_t msg_size) // TODO: change MessageRef with seastar::shared_ptr auto msg_ref = MessageRef{message, false}; - assert(io_state == io_state_t::open); + assert(ctx.get_io_state() == io_state_t::open); + assert(get_io_state() == io_state_t::open); + ceph_assert_always(conn_ref); + // throttle the reading process by the returned future return dispatchers.ms_dispatch(conn_ref, std::move(msg_ref)); + // user can make changes }); } void IOHandler::do_in_dispatch() { - ceph_assert_always(!in_exit_dispatching.has_value()); - in_exit_dispatching = seastar::promise<>(); - gate.dispatch_in_background("do_in_dispatch", conn, [this] { - return seastar::keep_doing([this] { - return frame_assembler->read_main_preamble( - ).then([this](auto ret) { + shard_states->enter_in_dispatching(); + shard_states->dispatch_in_background( + "do_in_dispatch", conn, [this, &ctx=*shard_states] { + return seastar::keep_doing([this, &ctx] { + return frame_assembler->read_main_preamble( + ).then([this, &ctx](auto ret) { switch (ret.tag) { case Tag::MESSAGE: { size_t msg_size = get_msg_size(*ret.rx_frame_asm); @@ -628,7 +1080,7 @@ void IOHandler::do_in_dispatch() return seastar::now(); } // TODO: message throttler - ceph_assert(false); + ceph_abort("TODO"); return seastar::now(); }).then([this, msg_size] { // throttle_bytes() logic @@ -643,14 +1095,14 @@ void IOHandler::do_in_dispatch() conn.policy.throttler_bytes->get_current(), conn.policy.throttler_bytes->get_max()); return conn.policy.throttler_bytes->get(msg_size); - }).then([this, msg_size] { + }).then([this, msg_size, &ctx] { // TODO: throttle_dispatch_queue() logic utime_t throttle_stamp{seastar::lowres_system_clock::now()}; - return read_message(throttle_stamp, msg_size); + return read_message(ctx, throttle_stamp, msg_size); }); } case Tag::ACK: - return frame_assembler->read_frame_payload( + return frame_assembler->read_frame_payload( ).then([this](auto payload) { // handle_message_ack() logic auto ack = AckFrame::Decode(payload->back()); @@ -658,7 +1110,7 @@ void IOHandler::do_in_dispatch() ack_out_sent(ack.seq()); }); case Tag::KEEPALIVE2: - return frame_assembler->read_frame_payload( + return frame_assembler->read_frame_payload( ).then([this](auto payload) { // handle_keepalive2() logic auto keepalive_frame = KeepAliveFrame::Decode(payload->back()); @@ -666,12 +1118,14 @@ void IOHandler::do_in_dispatch() conn, keepalive_frame.timestamp()); // notify keepalive ack next_keepalive_ack = keepalive_frame.timestamp(); - notify_out_dispatch(); + if (seastar::this_shard_id() == get_shard_id()) { + notify_out_dispatch(); + } last_keepalive = seastar::lowres_system_clock::now(); }); case Tag::KEEPALIVE2_ACK: - return frame_assembler->read_frame_payload( + return frame_assembler->read_frame_payload( ).then([this](auto payload) { // handle_keepalive2_ack() logic auto keepalive_ack_frame = KeepAliveFrameAck::Decode(payload->back()); @@ -688,7 +1142,7 @@ void IOHandler::do_in_dispatch() } } }); - }).handle_exception([this](std::exception_ptr eptr) { + }).handle_exception([this, &ctx](std::exception_ptr eptr) { const char *e_what; try { std::rethrow_exception(eptr); @@ -696,21 +1150,138 @@ void IOHandler::do_in_dispatch() e_what = e.what(); } + auto io_state = ctx.get_io_state(); if (io_state == io_state_t::open) { - logger().info("{} do_in_dispatch(): fault at {}, going to delay -- {}", - conn, io_state, e_what); - set_io_state(io_state_t::delay); - handshake_listener->notify_out_fault("do_in_dispatch", eptr); + auto cc_seq = crosscore.prepare_submit(); + logger().info("{} do_in_dispatch(): fault at {}, {}, going to delay -- {}, " + "send {} notify_out_fault()", + conn, io_state, io_stat_printer{*this}, e_what, cc_seq); + do_set_io_state(io_state_t::delay); + shard_states->dispatch_in_background( + "notify_out_fault(in)", conn, [this, cc_seq, eptr] { + auto states = get_states(); + return seastar::smp::submit_to( + conn.get_messenger_shard_id(), [this, cc_seq, eptr, states] { + return handshake_listener->notify_out_fault( + cc_seq, "do_in_dispatch", eptr, states); + }); + }); } else { - logger().info("{} do_in_dispatch(): fault at {} -- {}", - conn, io_state, e_what); + if (io_state != io_state_t::switched) { + logger().info("{} do_in_dispatch(): fault at {}, {} -- {}", + conn, io_state, io_stat_printer{*this}, e_what); + } else { + logger().info("{} do_in_dispatch(): fault at {} -- {}", + conn, io_state, e_what); + } } - }).finally([this] { - ceph_assert_always(in_exit_dispatching.has_value()); - in_exit_dispatching->set_value(); - in_exit_dispatching = std::nullopt; + }).finally([&ctx] { + ctx.exit_in_dispatching(); }); }); } +seastar::future<> +IOHandler::close_io( + crosscore_t::seq_t cc_seq, + bool is_dispatch_reset, + bool is_replace) +{ + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); + if (!crosscore.proceed_or_wait(cc_seq)) { + logger().debug("{} got {} close_io(), wait at {}", + conn, cc_seq, crosscore.get_in_seq()); + return crosscore.wait(cc_seq + ).then([this, cc_seq, is_dispatch_reset, is_replace] { + return close_io(cc_seq, is_dispatch_reset, is_replace); + }); + } + + logger().debug("{} got {} close_io(reset={}, replace={})", + conn, cc_seq, is_dispatch_reset, is_replace); + ceph_assert_always(get_io_state() == io_state_t::drop); + + if (is_dispatch_reset) { + dispatch_reset(is_replace); + } + + ceph_assert_always(conn_ref); + conn_ref.reset(); + + // cannot be running in parallel with to_new_sid() + if (maybe_dropped_sid.has_value()) { + assert(shard_states->assert_closed_and_exit()); + auto prv_sid = *maybe_dropped_sid; + return cleanup_prv_shard(prv_sid); + } else { + return shard_states->close( + ).then([this] { + assert(shard_states->assert_closed_and_exit()); + }); + } +} + +/* + * IOHandler::shard_states_t + */ + +void +IOHandler::shard_states_t::notify_out_dispatching_stopped( + const char *what, SocketConnection &conn) +{ + assert(seastar::this_shard_id() == sid); + if (unlikely(out_exit_dispatching.has_value())) { + out_exit_dispatching->set_value(); + out_exit_dispatching = std::nullopt; + logger().info("{} do_out_dispatch: stop({}) at {}, set out_exit_dispatching", + conn, what, io_state); + } else { + if (unlikely(io_state != io_state_t::open)) { + logger().info("{} do_out_dispatch: stop({}) at {}, no out_exit_dispatching", + conn, what, io_state); + } + } +} + +seastar::future<> +IOHandler::shard_states_t::wait_io_exit_dispatching() +{ + assert(seastar::this_shard_id() == sid); + assert(io_state != io_state_t::open); + assert(!gate.is_closed()); + return seastar::when_all( + [this] { + if (out_exit_dispatching) { + return out_exit_dispatching->get_future(); + } else { + return seastar::now(); + } + }(), + [this] { + if (in_exit_dispatching) { + return in_exit_dispatching->get_future(); + } else { + return seastar::now(); + } + }() + ).discard_result(); +} + +IOHandler::shard_states_ref_t +IOHandler::shard_states_t::create_from_previous( + shard_states_t &prv_states, + seastar::shard_id new_sid) +{ + auto io_state = prv_states.io_state; + assert(io_state != io_state_t::open); + auto ret = shard_states_t::create(new_sid, io_state); + if (io_state == io_state_t::drop) { + // the new gate should not never be used + auto fut = ret->gate.close(); + ceph_assert_always(fut.available()); + } + prv_states.set_io_state(io_state_t::switched); + return ret; +} + } // namespace crimson::net diff --git a/ceph/src/crimson/net/io_handler.h b/ceph/src/crimson/net/io_handler.h index e04b6356e..f53c2ba64 100644 --- a/ceph/src/crimson/net/io_handler.h +++ b/ceph/src/crimson/net/io_handler.h @@ -3,6 +3,9 @@ #pragma once +#include + +#include #include #include "crimson/common/gated.h" @@ -12,12 +15,106 @@ namespace crimson::net { +/** + * crosscore_t + * + * To preserve the event order across cores. + */ +class crosscore_t { +public: + using seq_t = uint64_t; + + crosscore_t() = default; + ~crosscore_t() = default; + + seq_t get_in_seq() const { + return in_seq; + } + + seq_t prepare_submit() { + ++out_seq; + return out_seq; + } + + bool proceed_or_wait(seq_t seq) { + if (seq == in_seq + 1) { + ++in_seq; + if (unlikely(in_pr_wait.has_value())) { + in_pr_wait->set_value(); + in_pr_wait = std::nullopt; + } + return true; + } else { + return false; + } + } + + seastar::future<> wait(seq_t seq) { + assert(seq != in_seq + 1); + if (!in_pr_wait.has_value()) { + in_pr_wait = seastar::shared_promise<>(); + } + return in_pr_wait->get_shared_future(); + } + +private: + seq_t out_seq = 0; + seq_t in_seq = 0; + std::optional> in_pr_wait; +}; + +/** + * io_handler_state + * + * It is required to populate the states from IOHandler to ProtocolV2 + * asynchronously. + */ +struct io_handler_state { + seq_num_t in_seq; + bool is_out_queued; + bool has_out_sent; + + bool is_out_queued_or_sent() const { + return is_out_queued || has_out_sent; + } + + /* + * should be consistent with the accroding interfaces in IOHandler + */ + + void reset_session(bool full) { + in_seq = 0; + if (full) { + is_out_queued = false; + has_out_sent = false; + } + } + + void reset_peer_state() { + in_seq = 0; + is_out_queued = is_out_queued_or_sent(); + has_out_sent = false; + } + + void requeue_out_sent_up_to() { + // noop since the information is insufficient + } + + void requeue_out_sent() { + if (has_out_sent) { + has_out_sent = false; + is_out_queued = true; + } + } +}; + /** * HandshakeListener * - * The interface class for IOHandler to notify the ProtocolV2 for handshake. + * The interface class for IOHandler to notify the ProtocolV2. * - * The notifications may be cross-core and asynchronous. + * The notifications may be cross-core and must be sent to + * SocketConnection::get_messenger_shard_id() */ class HandshakeListener { public: @@ -28,11 +125,17 @@ public: HandshakeListener &operator=(const HandshakeListener &) = delete; HandshakeListener &operator=(HandshakeListener &&) = delete; - virtual void notify_out() = 0; + virtual seastar::future<> notify_out( + crosscore_t::seq_t cc_seq) = 0; - virtual void notify_out_fault(const char *where, std::exception_ptr) = 0; + virtual seastar::future<> notify_out_fault( + crosscore_t::seq_t cc_seq, + const char *where, + std::exception_ptr, + io_handler_state) = 0; - virtual void notify_mark_down() = 0; + virtual seastar::future<> notify_mark_down( + crosscore_t::seq_t cc_seq) = 0; protected: HandshakeListener() = default; @@ -60,24 +163,32 @@ public: /* * as ConnectionHandler */ -private: +public: + seastar::shard_id get_shard_id() const final { + return shard_states->get_shard_id(); + } + bool is_connected() const final { + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); return protocol_is_connected; } - seastar::future<> send(MessageURef msg) final; + seastar::future<> send(MessageFRef msg) final; seastar::future<> send_keepalive() final; clock_t::time_point get_last_keepalive() const final { + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); return last_keepalive; } clock_t::time_point get_last_keepalive_ack() const final { + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); return last_keepalive_ack; } void set_last_keepalive_ack(clock_t::time_point when) final { + ceph_assert_always(seastar::this_shard_id() == get_shard_id()); last_keepalive_ack = when; } @@ -89,31 +200,39 @@ private: * The calls may be cross-core and asynchronous */ public: + /* + * should not be called cross-core + */ + void set_handshake_listener(HandshakeListener &hl) { + assert(seastar::this_shard_id() == get_shard_id()); ceph_assert_always(handshake_listener == nullptr); handshake_listener = &hl; } + io_handler_state get_states() const { + // might be called from prv_sid during wait_io_exit_dispatching() + return {in_seq, is_out_queued(), has_out_sent()}; + } + struct io_stat_printer { const IOHandler &io_handler; }; void print_io_stat(std::ostream &out) const; - seastar::future<> close_io( - bool is_dispatch_reset, - bool is_replace) { - ceph_assert_always(io_state == io_state_t::drop); - - if (is_dispatch_reset) { - dispatch_reset(is_replace); - } + seastar::future<> set_accepted_sid( + crosscore_t::seq_t cc_seq, + seastar::shard_id sid, + ConnectionFRef conn_fref); - ceph_assert_always(conn_ref); - conn_ref.reset(); + /* + * may be called cross-core + */ - assert(!gate.is_closed()); - return gate.close(); - } + seastar::future<> close_io( + crosscore_t::seq_t cc_seq, + bool is_dispatch_reset, + bool is_replace); /** * io_state_t @@ -122,36 +241,219 @@ public: * io behavior accordingly. */ enum class io_state_t : uint8_t { - none, // no IO is possible as the connection is not available to the user yet. - delay, // IO is delayed until open. - open, // Dispatch In and Out concurrently. - drop // Drop IO as the connection is closed. + none, // no IO is possible as the connection is not available to the user yet. + delay, // IO is delayed until open. + open, // Dispatch In and Out concurrently. + drop, // Drop IO as the connection is closed. + switched // IO is switched to a different core + // (is moved to maybe_prv_shard_states) }; friend class fmt::formatter; - void set_io_state(const io_state_t &new_state, FrameAssemblerV2Ref fa=nullptr); + seastar::future<> set_io_state( + crosscore_t::seq_t cc_seq, + io_state_t new_state, + FrameAssemblerV2Ref fa, + bool set_notify_out); - seastar::future wait_io_exit_dispatching(); + struct exit_dispatching_ret { + FrameAssemblerV2Ref frame_assembler; + io_handler_state io_states; + }; + seastar::future + wait_io_exit_dispatching( + crosscore_t::seq_t cc_seq); - void reset_session(bool full); + seastar::future<> reset_session( + crosscore_t::seq_t cc_seq, + bool full); - void requeue_out_sent_up_to(seq_num_t seq); + seastar::future<> reset_peer_state( + crosscore_t::seq_t cc_seq); - void requeue_out_sent(); + seastar::future<> requeue_out_sent_up_to( + crosscore_t::seq_t cc_seq, + seq_num_t msg_seq); - bool is_out_queued_or_sent() const { - return is_out_queued() || !out_sent_msgs.empty(); - } + seastar::future<> requeue_out_sent( + crosscore_t::seq_t cc_seq); - seq_num_t get_in_seq() const { - return in_seq; + seastar::future<> dispatch_accept( + crosscore_t::seq_t cc_seq, + seastar::shard_id new_sid, + ConnectionFRef, + bool is_replace); + + seastar::future<> dispatch_connect( + crosscore_t::seq_t cc_seq, + seastar::shard_id new_sid, + ConnectionFRef); + + private: + class shard_states_t; + using shard_states_ref_t = std::unique_ptr; + + class shard_states_t { + public: + shard_states_t(seastar::shard_id _sid, io_state_t state) + : sid{_sid}, io_state{state} {} + + seastar::shard_id get_shard_id() const { + return sid; + } + + io_state_t get_io_state() const { + assert(seastar::this_shard_id() == sid); + return io_state; + } + + void set_io_state(io_state_t new_state) { + assert(seastar::this_shard_id() == sid); + assert(io_state != new_state); + pr_io_state_changed.set_value(); + pr_io_state_changed = seastar::promise<>(); + if (io_state == io_state_t::open) { + // from open + if (out_dispatching) { + ceph_assert_always(!out_exit_dispatching.has_value()); + out_exit_dispatching = seastar::promise<>(); + } + } + io_state = new_state; + } + + seastar::future<> wait_state_change() { + assert(seastar::this_shard_id() == sid); + return pr_io_state_changed.get_future(); + } + + template + void dispatch_in_background( + const char *what, SocketConnection &who, Func &&func) { + assert(seastar::this_shard_id() == sid); + ceph_assert_always(!gate.is_closed()); + gate.dispatch_in_background(what, who, std::move(func)); + } + + void enter_in_dispatching() { + assert(seastar::this_shard_id() == sid); + assert(io_state == io_state_t::open); + ceph_assert_always(!in_exit_dispatching.has_value()); + in_exit_dispatching = seastar::promise<>(); + } + + void exit_in_dispatching() { + assert(seastar::this_shard_id() == sid); + assert(io_state != io_state_t::open); + ceph_assert_always(in_exit_dispatching.has_value()); + in_exit_dispatching->set_value(); + in_exit_dispatching = std::nullopt; + } + + bool try_enter_out_dispatching() { + assert(seastar::this_shard_id() == sid); + if (out_dispatching) { + // already dispatching out + return false; + } + switch (io_state) { + case io_state_t::open: + [[fallthrough]]; + case io_state_t::delay: + out_dispatching = true; + return true; + case io_state_t::drop: + [[fallthrough]]; + case io_state_t::switched: + // do not dispatch out + return false; + default: + ceph_abort("impossible"); + } + } + + void notify_out_dispatching_stopped( + const char *what, SocketConnection &conn); + + void exit_out_dispatching( + const char *what, SocketConnection &conn) { + assert(seastar::this_shard_id() == sid); + ceph_assert_always(out_dispatching); + out_dispatching = false; + notify_out_dispatching_stopped(what, conn); + } + + seastar::future<> wait_io_exit_dispatching(); + + seastar::future<> close() { + assert(seastar::this_shard_id() == sid); + assert(!gate.is_closed()); + return gate.close(); + } + + bool assert_closed_and_exit() const { + assert(seastar::this_shard_id() == sid); + if (gate.is_closed()) { + ceph_assert_always(io_state == io_state_t::drop || + io_state == io_state_t::switched); + ceph_assert_always(!out_dispatching); + ceph_assert_always(!out_exit_dispatching); + ceph_assert_always(!in_exit_dispatching); + return true; + } else { + return false; + } + } + + static shard_states_ref_t create( + seastar::shard_id sid, io_state_t state) { + return std::make_unique(sid, state); + } + + static shard_states_ref_t create_from_previous( + shard_states_t &prv_states, seastar::shard_id new_sid); + + private: + const seastar::shard_id sid; + io_state_t io_state; + + crimson::common::Gated gate; + seastar::promise<> pr_io_state_changed; + bool out_dispatching = false; + std::optional> out_exit_dispatching; + std::optional> in_exit_dispatching; + }; + + void do_set_io_state( + io_state_t new_state, + std::optional cc_seq = std::nullopt, + FrameAssemblerV2Ref fa = nullptr, + bool set_notify_out = false); + + io_state_t get_io_state() const { + return shard_states->get_io_state(); } - void dispatch_accept(); + void do_requeue_out_sent(); - void dispatch_connect(); + void do_requeue_out_sent_up_to(seq_num_t seq); + + void assign_frame_assembler(FrameAssemblerV2Ref); + + seastar::future<> send_redirected(MessageFRef msg); + + seastar::future<> do_send(MessageFRef msg); + + seastar::future<> send_keepalive_redirected(); + + seastar::future<> do_send_keepalive(); + + seastar::future<> to_new_sid( + crosscore_t::seq_t cc_seq, + seastar::shard_id new_sid, + ConnectionFRef, + std::optional is_replace); - private: void dispatch_reset(bool is_replace); void dispatch_remote_reset(); @@ -163,26 +465,58 @@ public: next_keepalive_ack.has_value()); } + bool has_out_sent() const { + return !out_sent_msgs.empty(); + } + + void reset_in(); + void reset_out(); - seastar::future try_exit_out_dispatch(); + void discard_out_sent(); - seastar::future<> do_out_dispatch(); + seastar::future<> do_out_dispatch(shard_states_t &ctx); - ceph::bufferlist sweep_out_pending_msgs_to_sent( +#ifdef UNIT_TESTS_BUILT + struct sweep_ret { + ceph::bufferlist bl; + std::vector tags; + }; + sweep_ret +#else + ceph::bufferlist +#endif + sweep_out_pending_msgs_to_sent( bool require_keepalive, std::optional maybe_keepalive_ack, bool require_ack); + void maybe_notify_out_dispatch(); + void notify_out_dispatch(); void ack_out_sent(seq_num_t seq); - seastar::future<> read_message(utime_t throttle_stamp, std::size_t msg_size); + seastar::future<> read_message( + shard_states_t &ctx, + utime_t throttle_stamp, + std::size_t msg_size); void do_in_dispatch(); + seastar::future<> cleanup_prv_shard(seastar::shard_id prv_sid); + private: + shard_states_ref_t shard_states; + + crosscore_t crosscore; + + // drop was happening in the previous sid + std::optional maybe_dropped_sid; + + // the remaining states in the previous sid for cleanup, see to_new_sid() + shard_states_ref_t maybe_prv_shard_states; + ChainedDispatchers &dispatchers; SocketConnection &conn; @@ -192,35 +526,24 @@ private: HandshakeListener *handshake_listener = nullptr; - crimson::common::Gated gate; - FrameAssemblerV2Ref frame_assembler; bool protocol_is_connected = false; bool need_dispatch_reset = true; - io_state_t io_state = io_state_t::none; - - // wait until current io_state changed - seastar::promise<> io_state_changed; - /* * out states for writing */ - bool out_dispatching = false; - - std::optional> out_exit_dispatching; - /// the seq num of the last transmitted message seq_num_t out_seq = 0; // messages to be resent after connection gets reset - std::deque out_pending_msgs; + std::deque out_pending_msgs; // messages sent, but not yet acked by peer - std::deque out_sent_msgs; + std::deque out_sent_msgs; bool need_keepalive = false; @@ -228,12 +551,12 @@ private: uint64_t ack_left = 0; + bool need_notify_out = false; + /* * in states for reading */ - std::optional> in_exit_dispatching; - /// the seq num of the last received message seq_num_t in_seq = 0; @@ -250,6 +573,23 @@ inline std::ostream& operator<<( } // namespace crimson::net +template <> +struct fmt::formatter { + constexpr auto parse(format_parse_context& ctx) { + return ctx.begin(); + } + + template + auto format(crimson::net::io_handler_state state, FormatContext& ctx) { + return fmt::format_to( + ctx.out(), + "io(in_seq={}, is_out_queued={}, has_out_sent={})", + state.in_seq, + state.is_out_queued, + state.has_out_sent); + } +}; + template <> struct fmt::formatter : fmt::formatter { @@ -270,6 +610,9 @@ struct fmt::formatter case drop: name = "drop"; break; + case switched: + name = "switched"; + break; } return formatter::format(name, ctx); } diff --git a/ceph/src/crimson/os/futurized_store.h b/ceph/src/crimson/os/futurized_store.h index 1c65f5b2f..783cd7485 100644 --- a/ceph/src/crimson/os/futurized_store.h +++ b/ceph/src/crimson/os/futurized_store.h @@ -25,8 +25,6 @@ class Transaction; namespace crimson::os { class FuturizedCollection; -constexpr core_id_t PRIMARY_CORE = 0; - class FuturizedStore { public: class Shard { diff --git a/ceph/src/crimson/os/seastore/CMakeLists.txt b/ceph/src/crimson/os/seastore/CMakeLists.txt index 5b1c6187c..4bdbab8c4 100644 --- a/ceph/src/crimson/os/seastore/CMakeLists.txt +++ b/ceph/src/crimson/os/seastore/CMakeLists.txt @@ -51,6 +51,7 @@ set(crimson_seastore_srcs journal.cc device.cc segment_manager_group.cc + record_scanner.cc journal/circular_bounded_journal.cc ../../../test/crimson/seastore/test_block.cc ${PROJECT_SOURCE_DIR}/src/os/Transaction.cc @@ -61,7 +62,7 @@ CMAKE_DEPENDENT_OPTION(WITH_ZNS "enable Linux ZNS support" OFF if(WITH_ZNS) find_package(LinuxZNS REQUIRED) list(APPEND crimson_seastore_srcs - segment_manager/zns.cc) + segment_manager/zbd.cc) endif() add_library(crimson-seastore STATIC diff --git a/ceph/src/crimson/os/seastore/async_cleaner.cc b/ceph/src/crimson/os/seastore/async_cleaner.cc index 84677747b..d7e398f5f 100644 --- a/ceph/src/crimson/os/seastore/async_cleaner.cc +++ b/ceph/src/crimson/os/seastore/async_cleaner.cc @@ -986,7 +986,7 @@ segment_id_t SegmentCleaner::allocate_segment( ERROR("out of space with {} {} {} {}", type, segment_seq_printer_t{seq}, category, rewrite_gen_printer_t{generation}); - ceph_abort(); + ceph_abort("seastore device size setting is too small"); return NULL_SEG_ID; } @@ -1462,6 +1462,7 @@ void SegmentCleaner::mark_space_used( { LOG_PREFIX(SegmentCleaner::mark_space_used); assert(background_callback->get_state() >= state_t::SCAN_SPACE); + assert(len); // TODO: drop if (addr.get_addr_type() != paddr_types_t::SEGMENT) { return; @@ -1492,6 +1493,7 @@ void SegmentCleaner::mark_space_free( { LOG_PREFIX(SegmentCleaner::mark_space_free); assert(background_callback->get_state() >= state_t::SCAN_SPACE); + assert(len); // TODO: drop if (addr.get_addr_type() != paddr_types_t::SEGMENT) { return; diff --git a/ceph/src/crimson/os/seastore/backref/btree_backref_manager.cc b/ceph/src/crimson/os/seastore/backref/btree_backref_manager.cc index ce3f737b2..30ff45540 100644 --- a/ceph/src/crimson/os/seastore/backref/btree_backref_manager.cc +++ b/ceph/src/crimson/os/seastore/backref/btree_backref_manager.cc @@ -40,14 +40,14 @@ const get_phy_tree_root_node_ret get_phy_tree_root_node< } else { return {false, trans_intr::make_interruptible( - seastar::make_ready_future< - CachedExtentRef>(CachedExtentRef()))}; + Cache::get_extent_ertr::make_ready_future< + CachedExtentRef>())}; } } else { return {false, trans_intr::make_interruptible( - seastar::make_ready_future< - CachedExtentRef>(CachedExtentRef()))}; + Cache::get_extent_ertr::make_ready_future< + CachedExtentRef>())}; } } @@ -114,10 +114,9 @@ BtreeBackrefManager::get_mapping( } else { TRACET("{} got {}, {}", c.trans, offset, iter.get_key(), iter.get_val()); - auto e = iter.get_pin(c); return get_mapping_ret( interruptible::ready_future_marker{}, - std::move(e)); + iter.get_pin(c)); } }); }); @@ -151,7 +150,7 @@ BtreeBackrefManager::get_mappings( TRACET("{}~{} got {}, {}, repeat ...", c.trans, offset, end, pos.get_key(), pos.get_val()); ceph_assert((pos.get_key().add_offset(pos.get_val().len)) > offset); - ret.push_back(pos.get_pin(c)); + ret.emplace_back(pos.get_pin(c)); return BackrefBtree::iterate_repeat_ret_inner( interruptible::ready_future_marker{}, seastar::stop_iteration::no); @@ -248,7 +247,8 @@ BtreeBackrefManager::new_mapping( }); }); }).si_then([c](auto &&state) { - return state.ret->get_pin(c); + return new_mapping_iertr::make_ready_future( + state.ret->get_pin(c)); }); } @@ -332,17 +332,6 @@ BtreeBackrefManager::merge_cached_backrefs( }); } -BtreeBackrefManager::check_child_trackers_ret -BtreeBackrefManager::check_child_trackers( - Transaction &t) { - auto c = get_context(t); - return with_btree( - cache, c, - [c](auto &btree) { - return btree.check_child_trackers(c); - }); -} - BtreeBackrefManager::scan_mapped_space_ret BtreeBackrefManager::scan_mapped_space( Transaction &t, @@ -397,34 +386,37 @@ BtreeBackrefManager::scan_mapped_space( ); }).si_then([this, &scan_visitor, c, FNAME, block_size] { // traverse alloc-deltas in order - auto &backref_entry_mset = cache.get_backref_entry_mset(); - DEBUGT("scan {} backref entries", c.trans, backref_entry_mset.size()); - for (auto &backref_entry : backref_entry_mset) { - if (backref_entry.laddr == L_ADDR_NULL) { - TRACET("backref entry {}~{} {} free", - c.trans, - backref_entry.paddr, - backref_entry.len, - backref_entry.type); - } else { - TRACET("backref entry {}~{} {}~{} {} used", - c.trans, - backref_entry.paddr, - backref_entry.len, - backref_entry.laddr, - backref_entry.len, - backref_entry.type); - } - ceph_assert(backref_entry.paddr.is_absolute()); - ceph_assert(backref_entry.len > 0 && - backref_entry.len % block_size == 0); - ceph_assert(!is_backref_node(backref_entry.type)); - scan_visitor( - backref_entry.paddr, + auto &backref_entryrefs = cache.get_backref_entryrefs_by_seq(); + for (auto &[seq, refs] : backref_entryrefs) { + boost::ignore_unused(seq); + DEBUGT("scan {} backref entries", c.trans, refs.size()); + for (auto &backref_entry : refs) { + if (backref_entry->laddr == L_ADDR_NULL) { + TRACET("backref entry {}~{} {} free", + c.trans, + backref_entry->paddr, + backref_entry->len, + backref_entry->type); + } else { + TRACET("backref entry {}~{} {}~{} {} used", + c.trans, + backref_entry->paddr, + backref_entry->len, + backref_entry->laddr, + backref_entry->len, + backref_entry->type); + } + ceph_assert(backref_entry->paddr.is_absolute()); + ceph_assert(backref_entry->len > 0 && + backref_entry->len % block_size == 0); + ceph_assert(!is_backref_node(backref_entry->type)); + scan_visitor( + backref_entry->paddr, P_ADDR_NULL, - backref_entry.len, - backref_entry.type, - backref_entry.laddr); + backref_entry->len, + backref_entry->type, + backref_entry->laddr); + } } }).si_then([this, &scan_visitor, block_size, c, FNAME] { BackrefBtree::mapped_space_visitor_t f = diff --git a/ceph/src/crimson/os/seastore/backref/btree_backref_manager.h b/ceph/src/crimson/os/seastore/backref/btree_backref_manager.h index e19d9ce7b..952e78b65 100644 --- a/ceph/src/crimson/os/seastore/backref/btree_backref_manager.h +++ b/ceph/src/crimson/os/seastore/backref/btree_backref_manager.h @@ -34,6 +34,13 @@ public: extent_types_t get_type() const final { return type; } + +protected: + std::unique_ptr> _duplicate( + op_context_t ctx) const final { + return std::unique_ptr>( + new BtreeBackrefMapping(ctx)); + } }; using BackrefBtree = FixedKVBtree< @@ -75,8 +82,6 @@ public: Transaction &t, paddr_t offset) final; - check_child_trackers_ret check_child_trackers(Transaction &t) final; - scan_mapped_space_ret scan_mapped_space( Transaction &t, scan_mapped_space_func_t &&f) final; diff --git a/ceph/src/crimson/os/seastore/backref_manager.h b/ceph/src/crimson/os/seastore/backref_manager.h index 4a354bdca..3feedb997 100644 --- a/ceph/src/crimson/os/seastore/backref_manager.h +++ b/ceph/src/crimson/os/seastore/backref_manager.h @@ -127,9 +127,6 @@ public: Transaction &t, paddr_t offset) = 0; - using check_child_trackers_ret = base_iertr::future<>; - virtual check_child_trackers_ret check_child_trackers(Transaction &t) = 0; - /** * scan all extents in both tree and cache, * including backref extents, logical extents and lba extents, diff --git a/ceph/src/crimson/os/seastore/btree/btree_range_pin.h b/ceph/src/crimson/os/seastore/btree/btree_range_pin.h index fef89197f..68188e9ff 100644 --- a/ceph/src/crimson/os/seastore/btree/btree_range_pin.h +++ b/ceph/src/crimson/os/seastore/btree/btree_range_pin.h @@ -21,21 +21,6 @@ struct op_context_t { constexpr uint16_t MAX_FIXEDKVBTREE_DEPTH = 8; -template -struct min_max_t {}; - -template <> -struct min_max_t { - static constexpr laddr_t max = L_ADDR_MAX; - static constexpr laddr_t min = L_ADDR_MIN; -}; - -template <> -struct min_max_t { - static constexpr paddr_t max = P_ADDR_MAX; - static constexpr paddr_t min = P_ADDR_MIN; -}; - template struct fixed_kv_node_meta_t { bound_t begin = min_max_t::min; @@ -117,7 +102,7 @@ struct fixed_kv_node_meta_le_t { template class BtreeNodeMapping : public PhysicalNodeMapping { - +protected: op_context_t ctx; /** * parent @@ -127,11 +112,16 @@ class BtreeNodeMapping : public PhysicalNodeMapping { */ CachedExtentRef parent; - val_t value; - extent_len_t len; + pladdr_t value; + extent_len_t len = 0; fixed_kv_node_meta_t range; uint16_t pos = std::numeric_limits::max(); + virtual std::unique_ptr _duplicate(op_context_t) const = 0; + fixed_kv_node_meta_t _get_pin_range() const { + return range; + } + public: using val_type = val_t; BtreeNodeMapping(op_context_t ctx) : ctx(ctx) {} @@ -140,14 +130,14 @@ public: op_context_t ctx, CachedExtentRef parent, uint16_t pos, - val_t &value, + pladdr_t value, extent_len_t len, - fixed_kv_node_meta_t &&meta) + fixed_kv_node_meta_t meta) : ctx(ctx), parent(parent), value(value), len(len), - range(std::move(meta)), + range(meta), pos(pos) { if (!parent->is_pending()) { @@ -182,16 +172,20 @@ public: } val_t get_val() const final { - return value; + if constexpr (std::is_same_v) { + return value.get_paddr(); + } else { + static_assert(std::is_same_v); + return value.get_laddr(); + } } - key_t get_key() const final { + key_t get_key() const override { return range.begin; } PhysicalNodeMappingRef duplicate() const final { - auto ret = std::unique_ptr>( - new BtreeNodeMapping(ctx)); + auto ret = _duplicate(ctx); ret->range = range; ret->value = value; ret->parent = parent; diff --git a/ceph/src/crimson/os/seastore/btree/fixed_kv_btree.h b/ceph/src/crimson/os/seastore/btree/fixed_kv_btree.h index 2aaf1620f..2970d0440 100644 --- a/ceph/src/crimson/os/seastore/btree/fixed_kv_btree.h +++ b/ceph/src/crimson/os/seastore/btree/fixed_kv_btree.h @@ -28,10 +28,12 @@ bool is_valid_child_ptr(ChildableCachedExtent* child); template phy_tree_root_t& get_phy_tree_root(root_t& r); +using get_child_iertr = + ::crimson::interruptible::interruptible_errorator< + typename trans_intr::condition, + get_child_ertr>; using get_phy_tree_root_node_ret = - std::pair>; + std::pair>; template const get_phy_tree_root_node_ret get_phy_tree_root_node( @@ -195,7 +197,10 @@ public: if constexpr ( std::is_same_v) { - ret.paddr = ret.paddr.maybe_relative_to(leaf.node->get_paddr()); + if (ret.pladdr.is_paddr()) { + ret.pladdr = ret.pladdr.get_paddr().maybe_relative_to( + leaf.node->get_paddr()); + } } return ret; } @@ -213,8 +218,7 @@ public: return leaf.pos == 0; } - PhysicalNodeMappingRef - get_pin(op_context_t ctx) const { + std::unique_ptr get_pin(op_context_t ctx) const { assert(!is_end()); auto val = get_val(); auto key = get_key(); @@ -485,11 +489,13 @@ public: return upper_bound(c, min_max_t::max); } - template + template ::type = 0> void check_node( op_context_t c, TCachedExtentRef node) { + assert(leaf_has_children); for (auto i : *node) { CachedExtentRef child_node; Transaction::get_extent_ret ret; @@ -499,35 +505,13 @@ public: i->get_val().maybe_relative_to(node->get_paddr()), &child_node); } else { - if constexpr (leaf_has_children) { - ret = c.trans.get_extent( - i->get_val().paddr.maybe_relative_to(node->get_paddr()), - &child_node); - } + assert(i->get_val().pladdr.is_paddr()); + ret = c.trans.get_extent( + i->get_val().pladdr.get_paddr().maybe_relative_to(node->get_paddr()), + &child_node); } if (ret == Transaction::get_extent_ret::PRESENT) { - if (child_node->is_mutation_pending()) { - auto &prior = (child_node_t &)*child_node->prior_instance; - assert(prior.is_valid()); - assert(prior.is_parent_valid()); - if (node->is_mutation_pending()) { - auto &n = node->get_stable_for_key(i->get_key()); - assert(prior.get_parent_node().get() == &n); - auto pos = n.lower_bound_offset(i->get_key()); - assert(pos < n.get_node_size()); - assert(n.children[pos] == &prior); - } else { - assert(prior.get_parent_node().get() == node.get()); - assert(node->children[i->get_offset()] == &prior); - } - } else if (child_node->is_initial_pending()) { - auto cnode = child_node->template cast(); - auto pos = node->find(i->get_key()).get_offset(); - auto child = node->children[pos]; - assert(child); - assert(child == cnode.get()); - assert(cnode->is_parent_valid()); - } else { + if (child_node->is_stable()) { assert(child_node->is_valid()); auto cnode = child_node->template cast(); assert(cnode->has_parent_tracker()); @@ -541,6 +525,32 @@ public: assert(cnode->get_parent_node().get() == node.get()); assert(node->children[i->get_offset()] == cnode.get()); } + } else if (child_node->is_pending()) { + if (child_node->is_mutation_pending()) { + auto &prior = (child_node_t &)*child_node->prior_instance; + assert(prior.is_valid()); + assert(prior.is_parent_valid()); + if (node->is_mutation_pending()) { + auto &n = node->get_stable_for_key(i->get_key()); + assert(prior.get_parent_node().get() == &n); + auto pos = n.lower_bound_offset(i->get_key()); + assert(pos < n.get_node_size()); + assert(n.children[pos] == &prior); + } else { + assert(prior.get_parent_node().get() == node.get()); + assert(node->children[i->get_offset()] == &prior); + } + } else { + auto cnode = child_node->template cast(); + auto pos = node->find(i->get_key()).get_offset(); + auto child = node->children[pos]; + assert(child); + assert(child == cnode.get()); + assert(cnode->is_parent_valid()); + } + } else { + ceph_assert(!child_node->is_valid()); + ceph_abort("impossible"); } } else if (ret == Transaction::get_extent_ret::ABSENT) { ChildableCachedExtent* child = nullptr; @@ -570,7 +580,10 @@ public: assert(!c.cache.query_cache(i->get_val(), nullptr)); } else { if constexpr (leaf_has_children) { - assert(!c.cache.query_cache(i->get_val().paddr, nullptr)); + assert(i->get_val().pladdr.is_paddr() + ? (bool)!c.cache.query_cache( + i->get_val().pladdr.get_paddr(), nullptr) + : true); } } } @@ -581,6 +594,8 @@ public: } using check_child_trackers_ret = base_iertr::future<>; + template ::type = 0> check_child_trackers_ret check_child_trackers( op_context_t c) { mapped_space_visitor_t checker = [c, this]( @@ -1395,7 +1410,7 @@ private: }; if (found) { - return fut.then_interruptible( + return fut.si_then( [this, c, on_found_internal=std::move(on_found_internal), on_found_leaf=std::move(on_found_leaf)](auto root) { LOG_PREFIX(FixedKVBtree::lookup_root); @@ -1474,7 +1489,7 @@ private: auto v = parent->template get_child(c, node_iter); if (v.has_child()) { - return v.get_child_fut().then( + return v.get_child_fut().safe_then( [on_found=std::move(on_found), node_iter, c, parent_entry](auto child) mutable { LOG_PREFIX(FixedKVBtree::lookup_internal_level); @@ -1542,7 +1557,7 @@ private: auto v = parent->template get_child(c, node_iter); if (v.has_child()) { - return v.get_child_fut().then( + return v.get_child_fut().safe_then( [on_found=std::move(on_found), node_iter, c, parent_entry](auto child) mutable { LOG_PREFIX(FixedKVBtree::lookup_leaf); @@ -2039,7 +2054,7 @@ private: pos.node = replacement; if (donor_is_left) { - pos.pos += r->get_size(); + pos.pos += l->get_size(); parent_pos.pos--; } @@ -2095,7 +2110,7 @@ private: auto v = parent_pos.node->template get_child(c, donor_iter); if (v.has_child()) { - return v.get_child_fut().then( + return v.get_child_fut().safe_then( [do_merge=std::move(do_merge), &pos, donor_iter, donor_is_left, c, parent_pos](auto child) mutable { LOG_PREFIX(FixedKVBtree::merge_level); diff --git a/ceph/src/crimson/os/seastore/btree/fixed_kv_node.h b/ceph/src/crimson/os/seastore/btree/fixed_kv_node.h index fe5052824..956a1824e 100644 --- a/ceph/src/crimson/os/seastore/btree/fixed_kv_node.h +++ b/ceph/src/crimson/os/seastore/btree/fixed_kv_node.h @@ -564,7 +564,7 @@ struct FixedKVInternalNode return this->get_split_pivot().get_offset(); } - void prepare_write() final { + void prepare_commit() final { if (this->is_initial_pending()) { if (this->is_rewrite()) { this->set_children_from_prior_instance(); @@ -1004,7 +1004,7 @@ struct FixedKVLeafNode } } - void prepare_write() final { + void prepare_commit() final { if constexpr (has_children) { if (this->is_initial_pending()) { if (this->is_rewrite()) { diff --git a/ceph/src/crimson/os/seastore/cache.cc b/ceph/src/crimson/os/seastore/cache.cc index d6c9fdce3..4d1dc9296 100644 --- a/ceph/src/crimson/os/seastore/cache.cc +++ b/ceph/src/crimson/os/seastore/cache.cc @@ -85,12 +85,6 @@ Cache::retire_extent_ret Cache::retire_extent_addr( ext = query_cache(addr, nullptr); if (ext) { DEBUGT("retire {}~{} in cache -- {}", t, addr, length, *ext); - if (ext->get_type() != extent_types_t::RETIRED_PLACEHOLDER) { - t.add_to_read_set(ext); - t.add_to_retired_set(ext); - return retire_extent_iertr::now(); - } - // the retired-placeholder exists } else { // add a new placeholder to Cache ext = CachedExtent::make_cached_extent_ref< @@ -105,8 +99,6 @@ Cache::retire_extent_ret Cache::retire_extent_addr( const auto t_src = t.get_src(); add_extent(ext, &t_src); } - - // add the retired-placeholder to transaction t.add_to_read_set(ext); t.add_to_retired_set(ext); return retire_extent_iertr::now(); @@ -225,9 +217,9 @@ void Cache::register_metrics() "cache", { sm::make_counter( - "trans_invalidated", + "trans_invalidated_by_extent", counter, - sm::description("total number of transaction invalidated"), + sm::description("total number of transactions invalidated by extents"), {src_label, ext_label} ), } @@ -295,6 +287,12 @@ void Cache::register_metrics() metrics.add_group( "cache", { + sm::make_counter( + "trans_invalidated", + efforts.total_trans_invalidated, + sm::description("total number of transactions invalidated"), + {src_label} + ), sm::make_counter( "invalidated_delta_bytes", efforts.mutate_delta_bytes, @@ -782,7 +780,7 @@ void Cache::commit_replace_extent( extents.replace(*next, *prev); if (prev->get_type() == extent_types_t::ROOT) { - assert(prev->is_clean() + assert(prev->is_stable_clean() || prev->primary_ref_list_hook.is_linked()); if (prev->is_dirty()) { stats.dirty_bytes -= prev->get_length(); @@ -843,6 +841,7 @@ void Cache::mark_transaction_conflicted( auto& efforts = get_by_src(stats.invalidated_efforts_by_src, t.get_src()); + ++efforts.total_trans_invalidated; auto& counter = get_by_ext(efforts.num_trans_invalidated, conflicting_extent.get_type()); @@ -1003,6 +1002,8 @@ CachedExtentRef Cache::duplicate_for_write( Transaction &t, CachedExtentRef i) { LOG_PREFIX(Cache::duplicate_for_write); + assert(i->is_fully_loaded()); + if (i->is_mutable()) return i; @@ -1010,6 +1011,12 @@ CachedExtentRef Cache::duplicate_for_write( i->version++; i->state = CachedExtent::extent_state_t::EXIST_MUTATION_PENDING; i->last_committed_crc = i->get_crc32c(); + // deepcopy the buffer of exist clean extent beacuse it shares + // buffer with original clean extent. + auto bp = i->get_bptr(); + auto nbp = ceph::bufferptr(bp.c_str(), bp.length()); + i->set_bptr(std::move(nbp)); + t.add_mutated_extent(i); DEBUGT("duplicate existing extent {}", t, *i); return i; @@ -1098,6 +1105,7 @@ record_t Cache::prepare_record( i->prepare_write(); i->set_io_wait(); + i->prepare_commit(); assert(i->get_version() > 0); auto final_crc = i->get_crc32c(); @@ -1200,6 +1208,7 @@ record_t Cache::prepare_record( bufferlist bl; i->prepare_write(); + i->prepare_commit(); bl.append(i->get_bptr()); if (i->get_type() == extent_types_t::ROOT) { ceph_assert(0 == "ROOT never gets written as a fresh block"); @@ -1240,6 +1249,7 @@ record_t Cache::prepare_record( assert(!i->is_inline()); get_by_ext(efforts.fresh_ool_by_ext, i->get_type()).increment(i->get_length()); + i->prepare_commit(); if (is_backref_mapped_extent_node(i)) { alloc_delta.alloc_blk_ranges.emplace_back( i->get_paddr(), @@ -1835,6 +1845,8 @@ Cache::get_next_dirty_extents_ret Cache::get_next_dirty_extents( i != dirty.end() && bytes_so_far < max_bytes; ++i) { auto dirty_from = i->get_dirty_from(); + //dirty extents must be fully loaded + assert(i->is_fully_loaded()); if (unlikely(dirty_from == JOURNAL_SEQ_NULL)) { ERRORT("got dirty extent with JOURNAL_SEQ_NULL -- {}", t, *i); ceph_abort(); diff --git a/ceph/src/crimson/os/seastore/cache.h b/ceph/src/crimson/os/seastore/cache.h index 9289dda08..c79473f98 100644 --- a/ceph/src/crimson/os/seastore/cache.h +++ b/ceph/src/crimson/os/seastore/cache.h @@ -332,6 +332,16 @@ public: extent_init_func(*ret); return read_extent( std::move(ret)); + } else if (!cached->is_fully_loaded()) { + auto ret = TCachedExtentRef(static_cast(cached.get())); + on_cache(*ret); + SUBDEBUG(seastore_cache, + "{} {}~{} is present without been fully loaded, reading ... -- {}", + T::TYPE, offset, length, *ret); + auto bp = alloc_cache_buf(length); + ret->set_bptr(std::move(bp)); + return read_extent( + std::move(ret)); } else { SUBTRACE(seastore_cache, "{} {}~{} is present in cache -- {}", @@ -377,31 +387,43 @@ public: auto result = t.get_extent(offset, &ret); if (result == Transaction::get_extent_ret::RETIRED) { SUBDEBUGT(seastore_cache, "{} {} is retired on t -- {}", - t, type, offset, *ret); + t, type, offset, *ret); return get_extent_if_cached_iertr::make_ready_future< CachedExtentRef>(ret); } else if (result == Transaction::get_extent_ret::PRESENT) { - SUBTRACET(seastore_cache, "{} {} is present on t -- {}", - t, type, offset, *ret); - return ret->wait_io().then([ret] { - return get_extent_if_cached_iertr::make_ready_future< - CachedExtentRef>(ret); - }); + if (ret->is_fully_loaded()) { + SUBTRACET(seastore_cache, "{} {} is present on t -- {}", + t, type, offset, *ret); + return ret->wait_io().then([ret] { + return get_extent_if_cached_iertr::make_ready_future< + CachedExtentRef>(ret); + }); + } else { + SUBDEBUGT(seastore_cache, "{} {} is present on t -- {}" + " without being fully loaded", t, type, offset, *ret); + return get_extent_if_cached_iertr::make_ready_future< + CachedExtentRef>(); + } } // get_extent_ret::ABSENT from transaction auto metric_key = std::make_pair(t.get_src(), type); ret = query_cache(offset, &metric_key); - if (!ret || - // retired_placeholder is not really cached yet - ret->get_type() == extent_types_t::RETIRED_PLACEHOLDER) { - SUBDEBUGT(seastore_cache, "{} {} is absent{}", - t, type, offset, !!ret ? "(placeholder)" : ""); - return get_extent_if_cached_iertr::make_ready_future< - CachedExtentRef>(); + if (!ret) { + SUBDEBUGT(seastore_cache, "{} {} is absent", t, type, offset); + return get_extent_if_cached_iertr::make_ready_future(); + } else if (ret->get_type() == extent_types_t::RETIRED_PLACEHOLDER) { + // retired_placeholder is not really cached yet + SUBDEBUGT(seastore_cache, "{} {} is absent(placeholder)", + t, type, offset); + return get_extent_if_cached_iertr::make_ready_future(); + } else if (!ret->is_fully_loaded()) { + SUBDEBUGT(seastore_cache, "{} {} is present without " + "being fully loaded", t, type, offset); + return get_extent_if_cached_iertr::make_ready_future(); } - // present in cache and is not a retired_placeholder + // present in cache(fully loaded) and is not a retired_placeholder SUBDEBUGT(seastore_cache, "{} {} is present in cache -- {}", t, type, offset, *ret); t.add_to_read_set(ret); @@ -432,33 +454,42 @@ public: CachedExtentRef ret; LOG_PREFIX(Cache::get_extent); auto result = t.get_extent(offset, &ret); - if (result != Transaction::get_extent_ret::ABSENT) { - SUBTRACET(seastore_cache, "{} {}~{} is {} on t -- {}", - t, - T::TYPE, - offset, - length, - result == Transaction::get_extent_ret::PRESENT ? "present" : "retired", - *ret); - assert(result != Transaction::get_extent_ret::RETIRED); - return ret->wait_io().then([ret] { - return seastar::make_ready_future>( - ret->cast()); - }); + if (result == Transaction::get_extent_ret::RETIRED) { + SUBERRORT(seastore_cache, "{} {}~{} is retired on t -- {}", + t, T::TYPE, offset, length, *ret); + ceph_abort("impossible"); + } else if (result == Transaction::get_extent_ret::PRESENT) { + if (ret->is_fully_loaded()) { + SUBTRACET(seastore_cache, "{} {}~{} is present on t -- {}", + t, T::TYPE, offset, length, *ret); + return ret->wait_io().then([ret] { + return seastar::make_ready_future>( + ret->cast()); + }); + } else { + assert(!ret->is_mutable()); + touch_extent(*ret); + SUBDEBUGT(seastore_cache, "{} {}~{} is present on t without been \ + fully loaded, reading ... {}", t, T::TYPE, offset, length, *ret); + auto bp = alloc_cache_buf(ret->get_length()); + ret->set_bptr(std::move(bp)); + return read_extent( + ret->cast()); + } + } else { + SUBTRACET(seastore_cache, "{} {}~{} is absent on t, query cache ...", + t, T::TYPE, offset, length); + auto f = [&t, this](CachedExtent &ext) { + t.add_to_read_set(CachedExtentRef(&ext)); + touch_extent(ext); + }; + auto metric_key = std::make_pair(t.get_src(), T::TYPE); + return trans_intr::make_interruptible( + get_extent( + offset, length, &metric_key, + std::forward(extent_init_func), std::move(f)) + ); } - - SUBTRACET(seastore_cache, "{} {}~{} is absent on t, query cache ...", - t, T::TYPE, offset, length); - auto f = [&t, this](CachedExtent &ext) { - t.add_to_read_set(CachedExtentRef(&ext)); - touch_extent(ext); - }; - auto metric_key = std::make_pair(t.get_src(), T::TYPE); - return trans_intr::make_interruptible( - get_extent( - offset, length, &metric_key, - std::forward(extent_init_func), std::move(f)) - ); } /* @@ -475,7 +506,7 @@ public: extent_len_t length, Func &&extent_init_func) { CachedExtentRef ret; - LOG_PREFIX(Cache::get_extent); + LOG_PREFIX(Cache::get_absent_extent); #ifndef NDEBUG auto r = t.get_extent(offset, &ret); @@ -522,7 +553,7 @@ public: return get_absent_extent(t, offset, length, [](T &){}); } - seastar::future get_extent_viewable_by_trans( + get_extent_ertr::future get_extent_viewable_by_trans( Transaction &t, CachedExtentRef extent) { @@ -533,19 +564,34 @@ public: touch_extent(*p_extent); } } + // user should not see RETIRED_PLACEHOLDER extents + ceph_assert(p_extent->get_type() != extent_types_t::RETIRED_PLACEHOLDER); + if (!p_extent->is_fully_loaded()) { + assert(!p_extent->is_mutable()); + touch_extent(*p_extent); + LOG_PREFIX(Cache::get_extent_viewable_by_trans); + SUBDEBUG(seastore_cache, + "{} {}~{} is present without been fully loaded, reading ... -- {}", + p_extent->get_type(), p_extent->get_paddr(),p_extent->get_length(), + *p_extent); + auto bp = alloc_cache_buf(p_extent->get_length()); + p_extent->set_bptr(std::move(bp)); + return read_extent(CachedExtentRef(p_extent)); + } return p_extent->wait_io( ).then([p_extent] { - return CachedExtentRef(p_extent); + return get_extent_ertr::make_ready_future( + CachedExtentRef(p_extent)); }); } template - seastar::future> get_extent_viewable_by_trans( + get_extent_ertr::future> get_extent_viewable_by_trans( Transaction &t, TCachedExtentRef extent) { return get_extent_viewable_by_trans(t, CachedExtentRef(extent.get()) - ).then([](auto p_extent) { + ).safe_then([](auto p_extent) { return p_extent->template cast(); }); } @@ -606,15 +652,26 @@ private: CachedExtentRef ret; auto status = t.get_extent(offset, &ret); if (status == Transaction::get_extent_ret::RETIRED) { - SUBDEBUGT(seastore_cache, "{} {}~{} {} is retired on t -- {}", + SUBERRORT(seastore_cache, "{} {}~{} {} is retired on t -- {}", t, type, offset, length, laddr, *ret); - return seastar::make_ready_future(); + ceph_abort("impossible"); } else if (status == Transaction::get_extent_ret::PRESENT) { - SUBTRACET(seastore_cache, "{} {}~{} {} is present on t -- {}", - t, type, offset, length, laddr, *ret); - return ret->wait_io().then([ret] { - return seastar::make_ready_future(ret); - }); + if (ret->is_fully_loaded()) { + SUBTRACET(seastore_cache, "{} {}~{} {} is present on t -- {}", + t, type, offset, length, laddr, *ret); + return ret->wait_io().then([ret] { + return seastar::make_ready_future(ret); + }); + } else { + assert(!ret->is_mutable()); + touch_extent(*ret); + SUBDEBUGT(seastore_cache, "{} {}~{} {} is present on t without been \ + fully loaded, reading ...", t, type, offset, length, laddr); + auto bp = alloc_cache_buf(ret->get_length()); + ret->set_bptr(std::move(bp)); + return read_extent( + std::move(ret)); + } } else { SUBTRACET(seastore_cache, "{} {}~{} {} is absent on t, query cache ...", t, type, offset, length, laddr); @@ -841,6 +898,48 @@ public: return ret; } + /** + * alloc_remapped_extent + * + * Allocates an EXIST_CLEAN extent. Use the buffer to fill the new extent + * if buffer exists. + */ + template + TCachedExtentRef alloc_remapped_extent( + Transaction &t, + laddr_t remap_laddr, + paddr_t remap_paddr, + extent_len_t remap_length, + laddr_t original_laddr, + std::optional &&original_bptr) { + LOG_PREFIX(Cache::alloc_remapped_extent); + assert(remap_laddr >= original_laddr); + TCachedExtentRef ext; + if (original_bptr.has_value()) { + // shallow copy the buffer from original extent + auto nbp = ceph::bufferptr( + *original_bptr, + remap_laddr - original_laddr, + remap_length); + // ExtentPlacementManager::alloc_new_extent will make a new + // (relative/temp) paddr, so make extent directly + ext = CachedExtent::make_cached_extent_ref(std::move(nbp)); + } else { + ext = CachedExtent::make_placeholder_cached_extent_ref(remap_length); + } + + ext->init(CachedExtent::extent_state_t::EXIST_CLEAN, + remap_paddr, + PLACEMENT_HINT_NULL, + NULL_GENERATION, + t.get_trans_id()); + + t.add_fresh_extent(ext); + SUBTRACET(seastore_cache, "allocated {} {}B, hint={}, has ptr? {} -- {}", + t, T::TYPE, remap_length, remap_laddr, original_bptr.has_value(), *ext); + return ext; + } + /** * alloc_new_extent * @@ -1185,7 +1284,7 @@ public: { if (p_src && is_background_transaction(*p_src)) return; - if (ext.is_clean() && !ext.is_placeholder()) { + if (ext.is_stable_clean() && !ext.is_placeholder()) { lru.move_to_top(ext); } } @@ -1265,7 +1364,7 @@ private: } void add_to_lru(CachedExtent &extent) { - assert(extent.is_clean() && !extent.is_placeholder()); + assert(extent.is_stable_clean() && !extent.is_placeholder()); if (!extent.primary_ref_list_hook.is_linked()) { contents += extent.get_length(); @@ -1291,7 +1390,7 @@ private: } void remove_from_lru(CachedExtent &extent) { - assert(extent.is_clean() && !extent.is_placeholder()); + assert(extent.is_stable_clean() && !extent.is_placeholder()); if (extent.primary_ref_list_hook.is_linked()) { lru.erase(lru.s_iterator_to(extent)); @@ -1302,7 +1401,7 @@ private: } void move_to_top(CachedExtent &extent) { - assert(extent.is_clean() && !extent.is_placeholder()); + assert(extent.is_stable_clean() && !extent.is_placeholder()); if (extent.primary_ref_list_hook.is_linked()) { lru.erase(lru.s_iterator_to(extent)); @@ -1342,6 +1441,7 @@ private: io_stat_t fresh; io_stat_t fresh_ool_written; counter_by_extent_t num_trans_invalidated; + uint64_t total_trans_invalidated = 0; uint64_t num_ool_records = 0; uint64_t ool_record_bytes = 0; }; @@ -1515,7 +1615,9 @@ private: get_extent_ret read_extent( TCachedExtentRef&& extent ) { - assert(extent->state == CachedExtent::extent_state_t::CLEAN_PENDING); + assert(extent->state == CachedExtent::extent_state_t::CLEAN_PENDING || + extent->state == CachedExtent::extent_state_t::EXIST_CLEAN || + extent->state == CachedExtent::extent_state_t::CLEAN); extent->set_io_wait(); return epm.read( extent->get_paddr(), @@ -1530,7 +1632,11 @@ private: extent->last_committed_crc = extent->get_crc32c(); extent->on_clean_read(); - } else { + } else if (extent->state == CachedExtent::extent_state_t::EXIST_CLEAN || + extent->state == CachedExtent::extent_state_t::CLEAN) { + /* TODO: crc should be checked against LBA manager */ + extent->last_committed_crc = extent->get_crc32c(); + } else { ceph_assert(!extent->is_valid()); } extent->complete_io(); diff --git a/ceph/src/crimson/os/seastore/cached_extent.h b/ceph/src/crimson/os/seastore/cached_extent.h index 464f34d79..02f8ae46c 100644 --- a/ceph/src/crimson/os/seastore/cached_extent.h +++ b/ceph/src/crimson/os/seastore/cached_extent.h @@ -250,6 +250,14 @@ public: */ virtual void prepare_write() {} + /** + * prepare_commit + * + * Called prior to committing the transaction in which this extent + * is living. + */ + virtual void prepare_commit() {} + /** * on_initial_write * @@ -332,6 +340,7 @@ public: << ", last_committed_crc=" << last_committed_crc << ", refcount=" << use_count() << ", user_hint=" << user_hint + << ", fully_loaded=" << is_fully_loaded() << ", rewrite_gen=" << rewrite_gen_printer_t{rewrite_generation}; if (state != extent_state_t::INVALID && state != extent_state_t::CLEAN_PENDING) { @@ -407,6 +416,13 @@ public: return is_mutable() || state == extent_state_t::EXIST_CLEAN; } + /// Returns true if extent is stable and shared among transactions + bool is_stable() const { + return state == extent_state_t::CLEAN_PENDING || + state == extent_state_t::CLEAN || + state == extent_state_t::DIRTY; + } + /// Returns true if extent has a pending delta bool is_mutation_pending() const { return state == extent_state_t::MUTATION_PENDING; @@ -426,6 +442,13 @@ public: state == extent_state_t::EXIST_CLEAN; } + // Returs true if extent is stable and clean + bool is_stable_clean() const { + ceph_assert(is_valid()); + return state == extent_state_t::CLEAN || + state == extent_state_t::CLEAN_PENDING; + } + /// Ruturns true if data is persisted while metadata isn't bool is_exist_clean() const { return state == extent_state_t::EXIST_CLEAN; @@ -473,6 +496,12 @@ public: return dirty_from_or_retired_at; } + /// Return true if extent is fully loaded or is about to be fully loaded (call + /// wait_io() in this case) + bool is_fully_loaded() const { + return ptr.has_value(); + } + /** * get_paddr * @@ -481,8 +510,18 @@ public: */ paddr_t get_paddr() const { return poffset; } - /// Returns length of extent - virtual extent_len_t get_length() const { return ptr.length(); } + /// Returns length of extent data in disk + extent_len_t get_length() const { + return length; + } + + extent_len_t get_loaded_length() const { + if (ptr.has_value()) { + return ptr->length(); + } else { + return 0; + } + } /// Returns version, get_version() == 0 iff is_clean() extent_version_t get_version() const { @@ -498,8 +537,14 @@ public: } /// Get ref to raw buffer - bufferptr &get_bptr() { return ptr; } - const bufferptr &get_bptr() const { return ptr; } + bufferptr &get_bptr() { + assert(ptr.has_value()); + return *ptr; + } + const bufferptr &get_bptr() const { + assert(ptr.has_value()); + return *ptr; + } /// Compare by paddr friend bool operator< (const CachedExtent &a, const CachedExtent &b) { @@ -579,6 +624,11 @@ private: return extent_index_hook.is_linked(); } + /// set bufferptr + void set_bptr(ceph::bufferptr &&nptr) { + ptr = nptr; + } + /// Returns true if the extent part of the open transaction bool is_pending_in_trans(transaction_id_t id) const { return is_pending() && pending_for_transaction == id; @@ -602,8 +652,11 @@ private: */ journal_seq_t dirty_from_or_retired_at; - /// Actual data contents - ceph::bufferptr ptr; + /// cache data contents, std::nullopt if no data in cache + std::optional ptr; + + /// disk data length + extent_len_t length; /// number of deltas since initial write extent_version_t version = 0; @@ -649,24 +702,53 @@ protected: trans_view_set_t mutation_pendings; CachedExtent(CachedExtent &&other) = delete; - CachedExtent(ceph::bufferptr &&ptr) : ptr(std::move(ptr)) {} + CachedExtent(ceph::bufferptr &&_ptr) : ptr(std::move(_ptr)) { + length = ptr->length(); + assert(length > 0); + } + + /// construct new CachedExtent, will deep copy the buffer CachedExtent(const CachedExtent &other) : state(other.state), dirty_from_or_retired_at(other.dirty_from_or_retired_at), - ptr(other.ptr.c_str(), other.ptr.length()), + length(other.get_length()), version(other.version), - poffset(other.poffset) {} + poffset(other.poffset) { + assert((length % CEPH_PAGE_SIZE) == 0); + if (other.is_fully_loaded()) { + ptr.emplace(buffer::create_page_aligned(length)); + other.ptr->copy_out(0, length, ptr->c_str()); + } else { + // the extent must be fully loaded before CoW + assert(length == 0); // in case of root + } + } struct share_buffer_t {}; - CachedExtent(const CachedExtent &other, share_buffer_t) : - state(other.state), - dirty_from_or_retired_at(other.dirty_from_or_retired_at), - ptr(other.ptr), - version(other.version), - poffset(other.poffset) {} + /// construct new CachedExtent, will shallow copy the buffer + CachedExtent(const CachedExtent &other, share_buffer_t) + : state(other.state), + dirty_from_or_retired_at(other.dirty_from_or_retired_at), + ptr(other.ptr), + length(other.get_length()), + version(other.version), + poffset(other.poffset) {} + + // 0 length is only possible for the RootBlock + struct zero_length_t {}; + CachedExtent(zero_length_t) : ptr(ceph::bufferptr(0)), length(0) {}; struct retired_placeholder_t{}; - CachedExtent(retired_placeholder_t) : state(extent_state_t::INVALID) {} + CachedExtent(retired_placeholder_t, extent_len_t _length) + : state(extent_state_t::INVALID), + length(_length) { + assert(length > 0); + } + + /// no buffer extent, for lazy read + CachedExtent(extent_len_t _length) : length(_length) { + assert(length > 0); + } friend class Cache; template @@ -675,6 +757,12 @@ protected: return new T(std::forward(args)...); } + template + static TCachedExtentRef make_placeholder_cached_extent_ref( + extent_len_t length) { + return new T(length); + } + void reset_prior_instance() { prior_instance.reset(); } @@ -898,12 +986,14 @@ private: uint16_t pos = std::numeric_limits::max(); }; +using get_child_ertr = crimson::errorator< + crimson::ct_error::input_output_error>; template struct get_child_ret_t { - std::variant>> ret; + std::variant>> ret; get_child_ret_t(child_pos_t pos) : ret(std::move(pos)) {} - get_child_ret_t(seastar::future> child) + get_child_ret_t(get_child_ertr::future> child) : ret(std::move(child)) {} bool has_child() const { @@ -915,7 +1005,7 @@ struct get_child_ret_t { return std::get<0>(ret); } - seastar::future> &get_child_fut() { + get_child_ertr::future> &get_child_fut() { ceph_assert(ret.index() == 1); return std::get<1>(ret); } @@ -938,6 +1028,15 @@ public: virtual bool has_been_invalidated() const = 0; virtual CachedExtentRef get_parent() const = 0; virtual uint16_t get_pos() const = 0; + // An lba pin may be indirect, see comments in lba_manager/btree/btree_lba_manager.h + virtual bool is_indirect() const { return false; } + virtual key_t get_intermediate_key() const { return min_max_t::null; } + virtual key_t get_intermediate_base() const { return min_max_t::null; } + virtual extent_len_t get_intermediate_length() const { return 0; } + // The start offset of the pin, must be 0 if the pin is not indirect + virtual extent_len_t get_intermediate_offset() const { + return std::numeric_limits::max(); + } virtual get_child_ret_t get_logical_extent(Transaction &t) = 0; @@ -978,14 +1077,10 @@ using backref_pin_list_t = std::list; * the Cache interface boundary. */ class RetiredExtentPlaceholder : public CachedExtent { - extent_len_t length; public: RetiredExtentPlaceholder(extent_len_t length) - : CachedExtent(CachedExtent::retired_placeholder_t{}), - length(length) {} - - extent_len_t get_length() const final { return length; } + : CachedExtent(CachedExtent::retired_placeholder_t{}, length) {} CachedExtentRef duplicate_for_write(Transaction&) final { ceph_assert(0 == "Should never happen for a placeholder"); @@ -1109,6 +1204,12 @@ public: laddr = nladdr; } + void maybe_set_intermediate_laddr(LBAMapping &mapping) { + laddr = mapping.is_indirect() + ? mapping.get_intermediate_base() + : mapping.get_key(); + } + void apply_delta_and_adjust_crc( paddr_t base, const ceph::bufferlist &bl) final { apply_delta(bl); @@ -1140,6 +1241,8 @@ protected: } private: + // the logical address of the extent, and if shared, + // it is the intermediate_base, see BtreeLBAMapping comments. laddr_t laddr = L_ADDR_NULL; }; diff --git a/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.cc b/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.cc index 9eaeefc72..ed17e2b12 100644 --- a/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.cc +++ b/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.cc @@ -46,7 +46,6 @@ std::ostream &CollectionNode::print_detail_l(std::ostream &out) const CollectionNode::list_ret CollectionNode::list() { - read_to_local(); logger().debug("CollectionNode:{}, {}", __func__, *this); CollectionManager::list_ret_bare list_result; for (auto &[coll, bits] : decoded) { @@ -60,7 +59,6 @@ CollectionNode::list() CollectionNode::create_ret CollectionNode::create(coll_context_t cc, coll_t coll, unsigned bits) { - read_to_local(); logger().debug("CollectionNode:{}", __func__); if (!is_mutable()) { auto mut = cc.tm.get_mutable_extent(cc.t, this)->cast(); @@ -88,8 +86,8 @@ CollectionNode::create(coll_context_t cc, coll_t coll, unsigned bits) CollectionNode::update_ret CollectionNode::update(coll_context_t cc, coll_t coll, unsigned bits) { - read_to_local(); - logger().debug("CollectionNode:{}", __func__); + logger().debug("trans.{} CollectionNode:{} {} {}", + cc.t.get_trans_id(), __func__, coll, bits); if (!is_mutable()) { auto mut = cc.tm.get_mutable_extent(cc.t, this)->cast(); return mut->update(cc, coll, bits); @@ -105,8 +103,8 @@ CollectionNode::update(coll_context_t cc, coll_t coll, unsigned bits) CollectionNode::remove_ret CollectionNode::remove(coll_context_t cc, coll_t coll) { - read_to_local(); - logger().debug("CollectionNode:{}", __func__); + logger().debug("trans.{} CollectionNode:{} {}", + cc.t.get_trans_id(),__func__, coll); if (!is_mutable()) { auto mut = cc.tm.get_mutable_extent(cc.t, this)->cast(); return mut->remove(cc, coll); diff --git a/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.h b/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.h index 1652eb92f..2690fb5fd 100644 --- a/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.h +++ b/ceph/src/crimson/os/seastore/collection_manager/collection_flat_node.h @@ -94,11 +94,11 @@ struct CollectionNode : LogicalCachedExtent { using CollectionNodeRef = TCachedExtentRef; - bool loaded = false; - - template - CollectionNode(T&&... t) - : LogicalCachedExtent(std::forward(t)...) {} + explicit CollectionNode(ceph::bufferptr &&ptr) + : LogicalCachedExtent(std::move(ptr)) {} + explicit CollectionNode(const CollectionNode &other) + : LogicalCachedExtent(other), + decoded(other.decoded) {} static constexpr extent_types_t type = extent_types_t::COLL_BLOCK; @@ -134,13 +134,11 @@ struct CollectionNode using update_ret = CollectionManager::update_ret; update_ret update(coll_context_t cc, coll_t coll, unsigned bits); - void read_to_local() { - if (loaded) return; + void on_clean_read() final { bufferlist bl; bl.append(get_bptr()); auto iter = bl.cbegin(); decode((base_coll_map_t&)decoded, iter); - loaded = true; } void copy_to_node() { diff --git a/ceph/src/crimson/os/seastore/collection_manager/flat_collection_manager.cc b/ceph/src/crimson/os/seastore/collection_manager/flat_collection_manager.cc index 15ce920ec..decb095f6 100644 --- a/ceph/src/crimson/os/seastore/collection_manager/flat_collection_manager.cc +++ b/ceph/src/crimson/os/seastore/collection_manager/flat_collection_manager.cc @@ -80,7 +80,6 @@ FlatCollectionManager::create(coll_root_t &coll_root, Transaction &t, coll_root.update(root_extent->get_laddr(), root_extent->get_length()); root_extent->decoded = extent->decoded; - root_extent->loaded = true; return root_extent->create( get_coll_context(t), cid, info.split_bits ).si_then([=, this, &t](auto result) { diff --git a/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.cc b/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.cc index 8d0de4e18..ec41bfab1 100644 --- a/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.cc +++ b/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.cc @@ -36,7 +36,6 @@ CircularBoundedJournal::open_for_mkfs() { return record_submitter.open(true ).safe_then([this](auto ret) { - record_submitter.update_committed_to(get_written_to()); return open_for_mkfs_ret( open_for_mkfs_ertr::ready_future_marker{}, get_written_to()); @@ -48,7 +47,6 @@ CircularBoundedJournal::open_for_mount() { return record_submitter.open(false ).safe_then([this](auto ret) { - record_submitter.update_committed_to(get_written_to()); return open_for_mount_ret( open_for_mount_ertr::ready_future_marker{}, get_written_to()); @@ -111,115 +109,192 @@ CircularBoundedJournal::do_submit_record( }); } +Journal::replay_ret CircularBoundedJournal::replay_segment( + cbj_delta_handler_t &handler, scan_valid_records_cursor& cursor) +{ + LOG_PREFIX(Journal::replay_segment); + return seastar::do_with( + RecordScanner::found_record_handler_t( + [this, &handler, FNAME]( + record_locator_t locator, + const record_group_header_t& r_header, + const bufferlist& mdbuf) + -> RecordScanner::scan_valid_records_ertr::future<> + { + auto maybe_record_deltas_list = try_decode_deltas( + r_header, mdbuf, locator.record_block_base); + if (!maybe_record_deltas_list) { + // This should be impossible, we did check the crc on the mdbuf + ERROR("unable to decode deltas for record {} at {}", + r_header, locator.record_block_base); + return crimson::ct_error::input_output_error::make(); + } + assert(locator.write_result.start_seq != JOURNAL_SEQ_NULL); + auto cursor_addr = convert_paddr_to_abs_addr(locator.write_result.start_seq.offset); + DEBUG("{} at {}", r_header, cursor_addr); + journal_seq_t start_seq = locator.write_result.start_seq; + auto write_result = write_result_t{ + start_seq, + r_header.mdlength + r_header.dlength + }; + auto expected_seq = locator.write_result.start_seq.segment_seq; + cursor_addr += (r_header.mdlength + r_header.dlength); + if (cursor_addr >= get_journal_end()) { + cursor_addr = get_records_start(); + ++expected_seq; + paddr_t addr = convert_abs_addr_to_paddr( + cursor_addr, + get_device_id()); + write_result.start_seq.offset = addr; + write_result.start_seq.segment_seq = expected_seq; + } + paddr_t addr = convert_abs_addr_to_paddr( + cursor_addr, + get_device_id()); + set_written_to( + journal_seq_t{expected_seq, addr}); + return seastar::do_with( + std::move(*maybe_record_deltas_list), + [write_result, + &handler, + FNAME](auto& record_deltas_list) { + return crimson::do_for_each( + record_deltas_list, + [write_result, + &handler, FNAME](record_deltas_t& record_deltas) { + auto locator = record_locator_t{ + record_deltas.record_block_base, + write_result + }; + DEBUG("processing {} deltas at block_base {}", + record_deltas.deltas.size(), + locator); + return crimson::do_for_each( + record_deltas.deltas, + [locator, + &handler](auto& p) { + auto& modify_time = p.first; + auto& delta = p.second; + return handler( + locator, + delta, + modify_time).discard_result(); + }); + }); + }); + }), + [=, this, &cursor](auto &dhandler) { + return scan_valid_records( + cursor, + cjs.get_cbj_header().magic, + std::numeric_limits::max(), + dhandler).safe_then([](auto){} + ).handle_error( + replay_ertr::pass_further{}, + crimson::ct_error::assert_all{ + "shouldn't meet with any other error other replay_ertr" + } + ); + } + ); +} + + Journal::replay_ret CircularBoundedJournal::scan_valid_record_delta( - cbj_delta_handler_t &&delta_handler, journal_seq_t tail) + cbj_delta_handler_t &&handler, journal_seq_t tail) { - LOG_PREFIX(CircularBoundedJournal::scan_valid_record_delta); + LOG_PREFIX(Journal::scan_valid_record_delta); + INFO("starting at {} ", tail); return seastar::do_with( + scan_valid_records_cursor(tail), + std::move(handler), bool(false), - rbm_abs_addr(get_rbm_addr(tail)), - std::move(delta_handler), - segment_seq_t(NULL_SEG_SEQ), - [this, FNAME](auto &is_rolled, auto &cursor_addr, auto &d_handler, auto &expected_seq) { - return crimson::repeat( - [this, &is_rolled, &cursor_addr, &d_handler, &expected_seq, FNAME]() mutable - -> replay_ertr::future { - paddr_t record_paddr = convert_abs_addr_to_paddr( - cursor_addr, - get_device_id()); - return read_record(record_paddr, expected_seq - ).safe_then([this, &is_rolled, &cursor_addr, &d_handler, &expected_seq, FNAME](auto ret) - -> replay_ertr::future { - if (!ret.has_value()) { - if (expected_seq == NULL_SEG_SEQ || is_rolled) { - DEBUG("no more records, stop replaying"); - return replay_ertr::make_ready_future< - seastar::stop_iteration>(seastar::stop_iteration::yes); - } else { - cursor_addr = get_records_start(); - ++expected_seq; - is_rolled = true; - return replay_ertr::make_ready_future< - seastar::stop_iteration>(seastar::stop_iteration::no); - } - } - auto [r_header, bl] = *ret; - bufferlist mdbuf; - mdbuf.substr_of(bl, 0, r_header.mdlength); - paddr_t record_block_base = paddr_t::make_blk_paddr( - get_device_id(), cursor_addr + r_header.mdlength); - auto maybe_record_deltas_list = try_decode_deltas( - r_header, mdbuf, record_block_base); - if (!maybe_record_deltas_list) { - // This should be impossible, we did check the crc on the mdbuf - ERROR("unable to decode deltas for record {} at {}", - r_header, record_block_base); - return crimson::ct_error::input_output_error::make(); - } - DEBUG("{} at {}", r_header, cursor_addr); - auto write_result = write_result_t{ - r_header.committed_to, - bl.length() - }; - if (expected_seq == NULL_SEG_SEQ) { - expected_seq = r_header.committed_to.segment_seq; - } else { - assert(expected_seq == r_header.committed_to.segment_seq); - } - cursor_addr += bl.length(); - if (cursor_addr >= get_journal_end()) { - assert(cursor_addr == get_journal_end()); - cursor_addr = get_records_start(); - ++expected_seq; - paddr_t addr = convert_abs_addr_to_paddr( - cursor_addr, - get_device_id()); - write_result.start_seq.offset = addr; - write_result.start_seq.segment_seq = expected_seq; - is_rolled = true; - } - paddr_t addr = convert_abs_addr_to_paddr( - cursor_addr, - get_device_id()); - set_written_to( - journal_seq_t{expected_seq, addr}); - return seastar::do_with( - std::move(*maybe_record_deltas_list), - [write_result, - &d_handler, - FNAME](auto& record_deltas_list) { - return crimson::do_for_each( - record_deltas_list, - [write_result, - &d_handler, FNAME](record_deltas_t& record_deltas) { - auto locator = record_locator_t{ - record_deltas.record_block_base, - write_result - }; - DEBUG("processing {} deltas at block_base {}", - record_deltas.deltas.size(), - locator); - return crimson::do_for_each( - record_deltas.deltas, - [locator, - &d_handler](auto& p) { - auto& modify_time = p.first; - auto& delta = p.second; - return d_handler( - locator, - delta, - modify_time).discard_result(); - }); - }).safe_then([]() { - return replay_ertr::make_ready_future< - seastar::stop_iteration>(seastar::stop_iteration::no); - }); - }); + [this] (auto &cursor, auto &handler, auto &rolled) { + return crimson::repeat([this, &handler, &cursor, &rolled]() + -> replay_ertr::future + { + return replay_segment(handler, cursor + ).safe_then([this, &cursor, &rolled] { + if (!rolled) { + cursor.last_valid_header_found = false; + } + if (!cursor.is_complete()) { + try_read_rolled_header(cursor); + rolled = true; + return replay_ertr::make_ready_future< + seastar::stop_iteration>(seastar::stop_iteration::no); + } + return replay_ertr::make_ready_future< + seastar::stop_iteration>(seastar::stop_iteration::yes); }); }); }); } +RecordScanner::read_ret CircularBoundedJournal::read(paddr_t start, size_t len) +{ + LOG_PREFIX(CircularBoundedJournal::read); + rbm_abs_addr addr = convert_paddr_to_abs_addr(start); + DEBUG("reading data from addr {} read length {}", addr, len); + auto bptr = bufferptr(ceph::buffer::create_page_aligned(len)); + return cjs.read(addr, bptr + ).safe_then([bptr=std::move(bptr)]() { + return read_ret( + RecordScanner::read_ertr::ready_future_marker{}, + std::move(bptr) + ); + }); +} + +bool CircularBoundedJournal::is_record_segment_seq_invalid( + scan_valid_records_cursor &cursor, + record_group_header_t &r_header) +{ + LOG_PREFIX(CircularBoundedJournal::is_record_segment_seq_invalid); + auto print_invalid = [FNAME](auto &r_header) { + DEBUG("invalid header: {}", r_header); + return true; + }; + if (cursor.seq.offset == convert_abs_addr_to_paddr( + get_records_start(), get_device_id())) { + if ((r_header.committed_to.segment_seq == NULL_SEG_SEQ && + cursor.seq.segment_seq != 0) || + r_header.committed_to.segment_seq != cursor.seq.segment_seq - 1) { + return print_invalid(r_header); + } + } else if (r_header.committed_to.segment_seq != cursor.seq.segment_seq) { + /* + * Assuing that seastore issues several records using submit_recods() + * as shown in the following example. + * + * Example ) + * a. submit_record(a); + * b. submit_record(b); + * c. submit_record(c); + * d. roll to begin + * e. submit_record(d); + * f. submit_record(e); + * g. submit_record(f); + * + * In this example, we need to consider the two cases. + * case 1) + * records a - e were issued in a batch manner + * case 2) + * When starts to submit_record(e) at step 6, submit(b) has completed its finalize phase, + * so the header of e's committed_to points to the end of b. + * + * To handle these cases correctly, the following condition is added. + */ + if ((r_header.committed_to.offset >= cursor.last_committed.offset && + r_header.committed_to.segment_seq == cursor.last_committed.segment_seq) && + r_header.committed_to.segment_seq == cursor.seq.segment_seq - 1) { + return false; + } + return print_invalid(r_header); + } + return false; +} + Journal::replay_ret CircularBoundedJournal::replay( delta_handler_t &&delta_handler) { @@ -286,7 +361,13 @@ Journal::replay_ret CircularBoundedJournal::replay( return scan_valid_record_delta(std::move(call_d_handler_if_valid), tail); }); }).safe_then([this]() { - record_submitter.update_committed_to(get_written_to()); + // make sure that committed_to is JOURNAL_SEQ_NULL if jounal is the initial state + if (get_written_to() != + journal_seq_t{0, + convert_abs_addr_to_paddr(get_records_start(), + get_device_id())}) { + record_submitter.update_committed_to(get_written_to()); + } trimmer.update_journal_tails( get_dirty_tail(), get_alloc_tail()); @@ -294,81 +375,6 @@ Journal::replay_ret CircularBoundedJournal::replay( }); } -CircularBoundedJournal::read_record_ret -CircularBoundedJournal::return_record(record_group_header_t& header, bufferlist bl) -{ - LOG_PREFIX(CircularBoundedJournal::return_record); - DEBUG("record size {}", bl.length()); - assert(bl.length() == header.mdlength + header.dlength); - bufferlist md_bl, data_bl; - md_bl.substr_of(bl, 0, header.mdlength); - data_bl.substr_of(bl, header.mdlength, header.dlength); - if (validate_records_metadata(md_bl) && - validate_records_data(header, data_bl)) { - return read_record_ret( - read_record_ertr::ready_future_marker{}, - std::make_pair(header, std::move(bl))); - } else { - DEBUG("invalid matadata"); - return read_record_ret( - read_record_ertr::ready_future_marker{}, - std::nullopt); - } -} - -CircularBoundedJournal::read_record_ret -CircularBoundedJournal::read_record(paddr_t off, segment_seq_t expected_seq) -{ - LOG_PREFIX(CircularBoundedJournal::read_record); - rbm_abs_addr addr = convert_paddr_to_abs_addr(off); - auto read_length = get_block_size(); - assert(addr + read_length <= get_journal_end()); - DEBUG("reading record from abs addr {} read length {}", addr, read_length); - auto bptr = bufferptr(ceph::buffer::create_page_aligned(read_length)); - return cjs.read(addr, bptr - ).safe_then([this, addr, bptr, expected_seq, FNAME]() mutable - -> read_record_ret { - record_group_header_t h; - bufferlist bl; - bl.append(bptr); - auto bp = bl.cbegin(); - try { - decode(h, bp); - } catch (ceph::buffer::error &e) { - return read_record_ret( - read_record_ertr::ready_future_marker{}, - std::nullopt); - } - if (h.mdlength < get_block_size() || - h.mdlength % get_block_size() != 0 || - h.dlength % get_block_size() != 0 || - addr + h.mdlength + h.dlength > get_journal_end() || - h.committed_to.segment_seq == NULL_SEG_SEQ || - (expected_seq != NULL_SEG_SEQ && - h.committed_to.segment_seq != expected_seq)) { - return read_record_ret( - read_record_ertr::ready_future_marker{}, - std::nullopt); - } - auto record_size = h.mdlength + h.dlength; - if (record_size > get_block_size()) { - auto next_addr = addr + get_block_size(); - auto next_length = record_size - get_block_size(); - auto next_bptr = bufferptr(ceph::buffer::create_page_aligned(next_length)); - DEBUG("reading record part 2 from abs addr {} read length {}", - next_addr, next_length); - return cjs.read(next_addr, next_bptr - ).safe_then([this, h, next_bptr=std::move(next_bptr), bl=std::move(bl)]() mutable { - bl.append(next_bptr); - return return_record(h, bl); - }); - } else { - assert(record_size == get_block_size()); - return return_record(h, bl); - } - }); -} - seastar::future<> CircularBoundedJournal::finish_commit(transaction_type_t type) { if (is_trim_transaction(type)) { return update_journal_tail( diff --git a/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.h b/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.h index bb3e2a860..debe535ae 100644 --- a/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.h +++ b/ceph/src/crimson/os/seastore/journal/circular_bounded_journal.h @@ -21,6 +21,7 @@ #include #include "crimson/os/seastore/journal/record_submitter.h" #include "crimson/os/seastore/journal/circular_journal_space.h" +#include "crimson/os/seastore/record_scanner.h" namespace crimson::os::seastore::journal { @@ -55,7 +56,7 @@ using RBMDevice = random_block_device::RBMDevice; constexpr uint64_t DEFAULT_BLOCK_SIZE = 4096; -class CircularBoundedJournal : public Journal { +class CircularBoundedJournal : public Journal, RecordScanner { public: CircularBoundedJournal( JournalTrimmer &trimmer, RBMDevice* device, const std::string &path); @@ -116,28 +117,6 @@ public: return cjs.get_alloc_tail(); } - using read_ertr = crimson::errorator< - crimson::ct_error::input_output_error, - crimson::ct_error::invarg, - crimson::ct_error::enoent, - crimson::ct_error::erange>; - using read_record_ertr = read_ertr; - using read_record_ret = read_record_ertr::future< - std::optional> - >; - /* - * read_record - * - * read record from given address - * - * @param paddr_t to read - * @param expected_seq - * - */ - read_record_ret read_record(paddr_t offset, segment_seq_t expected_seq); - - read_record_ret return_record(record_group_header_t& header, bufferlist bl); - void set_write_pipeline(WritePipeline *_write_pipeline) final { write_pipeline = _write_pipeline; } @@ -179,12 +158,48 @@ public: submit_record_ret do_submit_record(record_t &&record, OrderingHandle &handle); + void try_read_rolled_header(scan_valid_records_cursor &cursor) { + paddr_t addr = convert_abs_addr_to_paddr( + get_records_start(), + get_device_id()); + cursor.seq.offset = addr; + cursor.seq.segment_seq += 1; + } + + void initialize_cursor(scan_valid_records_cursor& cursor) final { + cursor.block_size = get_block_size(); + }; + + Journal::replay_ret replay_segment( + cbj_delta_handler_t &handler, scan_valid_records_cursor& cursor); + + read_ret read(paddr_t start, size_t len) final; + + bool is_record_segment_seq_invalid(scan_valid_records_cursor &cursor, + record_group_header_t &h) final; + + int64_t get_segment_end_offset(paddr_t addr) final { + return get_journal_end(); + } + // Test interfaces CircularJournalSpace& get_cjs() { return cjs; } + read_validate_record_metadata_ret test_read_validate_record_metadata( + scan_valid_records_cursor &cursor, + segment_nonce_t nonce) + { + return read_validate_record_metadata(cursor, nonce); + } + + void test_initialize_cursor(scan_valid_records_cursor &cursor) + { + initialize_cursor(cursor); + } + private: JournalTrimmer &trimmer; std::string path; diff --git a/ceph/src/crimson/os/seastore/journal/circular_journal_space.cc b/ceph/src/crimson/os/seastore/journal/circular_journal_space.cc index 7565c2815..123bb9135 100644 --- a/ceph/src/crimson/os/seastore/journal/circular_journal_space.cc +++ b/ceph/src/crimson/os/seastore/journal/circular_journal_space.cc @@ -18,8 +18,9 @@ std::ostream &operator<<(std::ostream &out, const CircularJournalSpace::cbj_header_t &header) { return out << "cbj_header_t(" - << ", dirty_tail=" << header.dirty_tail + << "dirty_tail=" << header.dirty_tail << ", alloc_tail=" << header.alloc_tail + << ", magic=" << header.magic << ")"; } @@ -41,8 +42,10 @@ CircularJournalSpace::roll_ertr::future<> CircularJournalSpace::roll() { get_records_start(), get_device_id()); auto seq = get_written_to(); + seq.segment_seq++; + assert(seq.segment_seq < MAX_SEG_SEQ); set_written_to( - journal_seq_t{++seq.segment_seq, paddr}); + journal_seq_t{seq.segment_seq, paddr}); return roll_ertr::now(); } @@ -86,6 +89,15 @@ CircularJournalSpace::write(ceph::bufferlist&& to_write) { ); } +segment_nonce_t calc_new_nonce( + uint32_t crc, + unsigned char const *data, + unsigned length) +{ + crc &= std::numeric_limits::max() >> 1; + return ceph_crc32c(crc, data, length); +} + CircularJournalSpace::open_ret CircularJournalSpace::open(bool is_mkfs) { std::ostringstream oss; oss << device_id_printer_t{get_device_id()}; @@ -103,13 +115,18 @@ CircularJournalSpace::open_ret CircularJournalSpace::open(bool is_mkfs) { get_records_start(), device->get_device_id())}; head.alloc_tail = head.dirty_tail; + auto meta = device->get_meta(); + head.magic = calc_new_nonce( + std::rand() % std::numeric_limits::max(), + reinterpret_cast(meta.seastore_id.bytes()), + sizeof(meta.seastore_id.uuid)); encode(head, bl); header = head; set_written_to(head.dirty_tail); initialized = true; DEBUG( - "initialize header block in CircularJournalSpace length {}", - bl.length()); + "initialize header block in CircularJournalSpace length {}, head: {}", + bl.length(), header); return write_header( ).safe_then([this]() { return open_ret( @@ -174,8 +191,8 @@ CircularJournalSpace::read_header() assert(device); auto bptr = bufferptr(ceph::buffer::create_page_aligned( device->get_block_size())); - DEBUG("reading {}", device->get_journal_start()); - return device->read(device->get_journal_start(), bptr + DEBUG("reading {}", device->get_shard_journal_start()); + return device->read(device->get_shard_journal_start(), bptr ).safe_then([bptr, FNAME]() mutable -> read_header_ret { bufferlist bl; @@ -222,7 +239,7 @@ CircularJournalSpace::write_header() assert(bl.length() < get_block_size()); bufferptr bp = bufferptr(ceph::buffer::create_page_aligned(get_block_size())); iter.copy(bl.length(), bp.c_str()); - return device->write(device->get_journal_start(), std::move(bp) + return device->write(device->get_shard_journal_start(), std::move(bp) ).handle_error( write_ertr::pass_further{}, crimson::ct_error::assert_all{ "Invalid error device->write" } diff --git a/ceph/src/crimson/os/seastore/journal/circular_journal_space.h b/ceph/src/crimson/os/seastore/journal/circular_journal_space.h index 1e97f4efe..c88b65ad5 100644 --- a/ceph/src/crimson/os/seastore/journal/circular_journal_space.h +++ b/ceph/src/crimson/os/seastore/journal/circular_journal_space.h @@ -39,7 +39,7 @@ class CircularJournalSpace : public JournalAllocator { } segment_nonce_t get_nonce() const final { - return 0; + return header.magic; } bool needs_roll(std::size_t length) const final; @@ -117,11 +117,13 @@ class CircularJournalSpace : public JournalAllocator { // start offset of CircularBoundedJournal in the device journal_seq_t dirty_tail; journal_seq_t alloc_tail; + segment_nonce_t magic; DENC(cbj_header_t, v, p) { DENC_START(1, 1, p); denc(v.dirty_tail, p); denc(v.alloc_tail, p); + denc(v.magic, p); DENC_FINISH(p); } }; @@ -188,7 +190,7 @@ class CircularJournalSpace : public JournalAllocator { } rbm_abs_addr get_records_start() const { assert(device); - return device->get_journal_start() + get_block_size(); + return device->get_shard_journal_start() + get_block_size(); } size_t get_records_available_size() const { return get_records_total_size() - get_records_used_size(); @@ -206,7 +208,7 @@ class CircularJournalSpace : public JournalAllocator { } rbm_abs_addr get_journal_end() const { assert(device); - return device->get_journal_start() + device->get_journal_size(); + return device->get_shard_journal_start() + device->get_journal_size(); } read_ertr::future<> read( diff --git a/ceph/src/crimson/os/seastore/lba_manager.h b/ceph/src/crimson/os/seastore/lba_manager.h index f36a78834..d7adf2304 100644 --- a/ceph/src/crimson/os/seastore/lba_manager.h +++ b/ceph/src/crimson/os/seastore/lba_manager.h @@ -39,6 +39,8 @@ public: * Fetches mappings for laddr_t in range [offset, offset + len) * * Future will not resolve until all pins have resolved (set_paddr called) + * For indirect lba mappings, get_mappings will always retrieve the original + * lba value. */ using get_mappings_iertr = base_iertr; using get_mappings_ret = get_mappings_iertr::future; @@ -50,6 +52,8 @@ public: * Fetches mappings for a list of laddr_t in range [offset, offset + len) * * Future will not resolve until all pins have resolved (set_paddr called) + * For indirect lba mappings, get_mappings will always retrieve the original + * lba value. */ virtual get_mappings_ret get_mappings( Transaction &t, @@ -59,6 +63,8 @@ public: * Fetches the mapping for laddr_t * * Future will not resolve until the pin has resolved (set_paddr called) + * For indirect lba mappings, get_mapping will always retrieve the original + * lba value. */ using get_mapping_iertr = base_iertr::extend< crimson::ct_error::enoent>; @@ -81,11 +87,24 @@ public: laddr_t hint, extent_len_t len, paddr_t addr, - LogicalCachedExtent *nextent) = 0; + LogicalCachedExtent &nextent) = 0; + + virtual alloc_extent_ret clone_extent( + Transaction &t, + laddr_t hint, + extent_len_t len, + laddr_t intermediate_key, + paddr_t actual_addr, + laddr_t intermediate_base) = 0; + + virtual alloc_extent_ret reserve_region( + Transaction &t, + laddr_t hint, + extent_len_t len) = 0; struct ref_update_result_t { unsigned refcount = 0; - paddr_t addr; + pladdr_t addr; extent_len_t length = 0; }; using ref_iertr = base_iertr::extend< @@ -99,7 +118,8 @@ public: */ virtual ref_ret decref_extent( Transaction &t, - laddr_t addr) = 0; + laddr_t addr, + bool cascade_remove) = 0; /** * Increments ref count on extent @@ -110,6 +130,16 @@ public: Transaction &t, laddr_t addr) = 0; + /** + * Increments ref count on extent + * + * @return returns resulting refcount + */ + virtual ref_ret incref_extent( + Transaction &t, + laddr_t addr, + int delta) = 0; + /** * Should be called after replay on each cached extent. * Implementation must initialize the LBAMapping on any diff --git a/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc b/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc index 296af756b..a607cd612 100644 --- a/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc +++ b/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc @@ -64,14 +64,14 @@ const get_phy_tree_root_node_ret get_phy_tree_root_node< } else { return {false, trans_intr::make_interruptible( - seastar::make_ready_future< - CachedExtentRef>(CachedExtentRef()))}; + Cache::get_extent_ertr::make_ready_future< + CachedExtentRef>())}; } } else { return {false, trans_intr::make_interruptible( - seastar::make_ready_future< - CachedExtentRef>(CachedExtentRef()))}; + Cache::get_extent_ertr::make_ready_future< + CachedExtentRef>())}; } } @@ -127,29 +127,94 @@ BtreeLBAManager::get_mappings( return with_btree_state( cache, c, - [c, offset, length, FNAME](auto &btree, auto &ret) { - return LBABtree::iterate_repeat( - c, - btree.upper_bound_right(c, offset), - [&ret, offset, length, c, FNAME](auto &pos) { - if (pos.is_end() || pos.get_key() >= (offset + length)) { - TRACET("{}~{} done with {} results", - c.trans, offset, length, ret.size()); - return typename LBABtree::iterate_repeat_ret_inner( + [c, offset, length, FNAME, this](auto &btree, auto &ret) { + return seastar::do_with( + std::list(), + [offset, length, c, FNAME, this, &ret, &btree](auto &pin_list) { + return LBABtree::iterate_repeat( + c, + btree.upper_bound_right(c, offset), + [&pin_list, offset, length, c, FNAME](auto &pos) { + if (pos.is_end() || pos.get_key() >= (offset + length)) { + TRACET("{}~{} done with {} results", + c.trans, offset, length, pin_list.size()); + return LBABtree::iterate_repeat_ret_inner( + interruptible::ready_future_marker{}, + seastar::stop_iteration::yes); + } + TRACET("{}~{} got {}, {}, repeat ...", + c.trans, offset, length, pos.get_key(), pos.get_val()); + ceph_assert((pos.get_key() + pos.get_val().len) > offset); + pin_list.push_back(pos.get_pin(c)); + return LBABtree::iterate_repeat_ret_inner( interruptible::ready_future_marker{}, - seastar::stop_iteration::yes); - } - TRACET("{}~{} got {}, {}, repeat ...", - c.trans, offset, length, pos.get_key(), pos.get_val()); - ceph_assert((pos.get_key() + pos.get_val().len) > offset); - ret.push_back(pos.get_pin(c)); - return typename LBABtree::iterate_repeat_ret_inner( - interruptible::ready_future_marker{}, - seastar::stop_iteration::no); + seastar::stop_iteration::no); + }).si_then([this, &ret, c, &pin_list] { + return _get_original_mappings(c, pin_list + ).si_then([&ret](auto _ret) { + ret = std::move(_ret); + }); + }); }); }); } +BtreeLBAManager::_get_original_mappings_ret +BtreeLBAManager::_get_original_mappings( + op_context_t c, + std::list &pin_list) +{ + return seastar::do_with( + lba_pin_list_t(), + [this, c, &pin_list](auto &ret) { + return trans_intr::do_for_each( + pin_list, + [this, c, &ret](auto &pin) { + LOG_PREFIX(BtreeLBAManager::get_mappings); + if (pin->get_raw_val().is_paddr()) { + ret.emplace_back(std::move(pin)); + return get_mappings_iertr::now(); + } + TRACET( + "getting original mapping for indirect mapping {}~{}", + c.trans, pin->get_key(), pin->get_length()); + return this->get_mappings( + c.trans, pin->get_raw_val().get_laddr(), pin->get_length() + ).si_then([&pin, &ret, c](auto new_pin_list) { + LOG_PREFIX(BtreeLBAManager::get_mappings); + assert(new_pin_list.size() == 1); + auto &new_pin = new_pin_list.front(); + auto intermediate_key = pin->get_raw_val().get_laddr(); + assert(!new_pin->is_indirect()); + assert(new_pin->get_key() <= intermediate_key); + assert(new_pin->get_key() + new_pin->get_length() >= + intermediate_key + pin->get_length()); + + TRACET("Got mapping {}~{} for indirect mapping {}~{}, " + "intermediate_key {}", + c.trans, + new_pin->get_key(), new_pin->get_length(), + pin->get_key(), pin->get_length(), + pin->get_raw_val().get_laddr()); + auto &btree_new_pin = static_cast(*new_pin); + btree_new_pin.set_key_for_indirect( + pin->get_key(), + pin->get_length(), + pin->get_raw_val().get_laddr()); + ret.emplace_back(std::move(new_pin)); + return seastar::now(); + }).handle_error_interruptible( + crimson::ct_error::input_output_error::pass_further{}, + crimson::ct_error::assert_all("unexpected enoent") + ); + } + ).si_then([&ret] { + return std::move(ret); + }); + }); +} + + BtreeLBAManager::get_mappings_ret BtreeLBAManager::get_mappings( Transaction &t, @@ -181,14 +246,27 @@ BtreeLBAManager::get_mapping( { LOG_PREFIX(BtreeLBAManager::get_mapping); TRACET("{}", t, offset); + return _get_mapping(t, offset + ).si_then([](auto pin) { + return get_mapping_iertr::make_ready_future(std::move(pin)); + }); +} + +BtreeLBAManager::_get_mapping_ret +BtreeLBAManager::_get_mapping( + Transaction &t, + laddr_t offset) +{ + LOG_PREFIX(BtreeLBAManager::_get_mapping); + TRACET("{}", t, offset); auto c = get_context(t); - return with_btree_ret( + return with_btree_ret( cache, c, - [FNAME, c, offset](auto &btree) { + [FNAME, c, offset, this](auto &btree) { return btree.lower_bound( c, offset - ).si_then([FNAME, offset, c](auto iter) -> get_mapping_ret { + ).si_then([FNAME, offset, c](auto iter) -> _get_mapping_ret { if (iter.is_end() || iter.get_key() != offset) { ERRORT("laddr={} doesn't exist", c.trans, offset); return crimson::ct_error::enoent::make(); @@ -196,20 +274,40 @@ BtreeLBAManager::get_mapping( TRACET("{} got {}, {}", c.trans, offset, iter.get_key(), iter.get_val()); auto e = iter.get_pin(c); - return get_mapping_ret( + return _get_mapping_ret( interruptible::ready_future_marker{}, std::move(e)); } + }).si_then([this, c](auto pin) -> _get_mapping_ret { + if (pin->get_raw_val().is_laddr()) { + return seastar::do_with( + std::move(pin), + [this, c](auto &pin) { + return _get_mapping( + c.trans, pin->get_raw_val().get_laddr() + ).si_then([&pin](auto new_pin) { + ceph_assert(pin->get_length() == new_pin->get_length()); + new_pin->set_key_for_indirect( + pin->get_key(), + pin->get_length()); + return new_pin; + }); + }); + } else { + return get_mapping_iertr::make_ready_future(std::move(pin)); + } }); }); } BtreeLBAManager::alloc_extent_ret -BtreeLBAManager::alloc_extent( +BtreeLBAManager::_alloc_extent( Transaction &t, laddr_t hint, extent_len_t len, - paddr_t addr, + pladdr_t addr, + paddr_t actual_addr, + laddr_t intermediate_base, LogicalCachedExtent* nextent) { struct state_t { @@ -221,7 +319,7 @@ BtreeLBAManager::alloc_extent( state_t(laddr_t hint) : last_end(hint) {} }; - LOG_PREFIX(BtreeLBAManager::alloc_extent); + LOG_PREFIX(BtreeLBAManager::_alloc_extent); TRACET("{}~{}, hint={}", t, addr, len, hint); auto c = get_context(t); ++stats.num_alloc_extents; @@ -272,21 +370,31 @@ BtreeLBAManager::alloc_extent( c, *state.insert_iter, state.last_end, - lba_map_val_t{len, addr, 1, 0}, + lba_map_val_t{len, pladdr_t(addr), 1, 0}, nextent ).si_then([&state, FNAME, c, addr, len, hint, nextent](auto &&p) { auto [iter, inserted] = std::move(p); TRACET("{}~{}, hint={}, inserted at {}", c.trans, addr, len, hint, state.last_end); if (nextent) { + ceph_assert(addr.is_paddr()); nextent->set_laddr(iter.get_key()); } ceph_assert(inserted); state.ret = iter; }); }); - }).si_then([c](auto &&state) { - return state.ret->get_pin(c); + }).si_then([c, actual_addr, addr, intermediate_base](auto &&state) { + auto ret_pin = state.ret->get_pin(c); + if (actual_addr != P_ADDR_NULL) { + ceph_assert(addr.is_laddr()); + ret_pin->set_paddr(actual_addr); + ret_pin->set_intermediate_base(intermediate_base); + } else { + ceph_assert(addr.is_paddr()); + } + return alloc_extent_iertr::make_ready_future( + std::move(ret_pin)); }); } @@ -311,7 +419,8 @@ _init_cached_extent( LOG_PREFIX(BtreeLBAManager::init_cached_extent); if (!iter.is_end() && iter.get_key() == logn->get_laddr() && - iter.get_val().paddr == logn->get_paddr()) { + iter.get_val().pladdr.is_paddr() && + iter.get_val().pladdr.get_paddr() == logn->get_paddr()) { assert(!iter.get_leaf_node()->is_pending()); iter.get_leaf_node()->link_child(logn.get(), iter.get_leaf_pos()); logn->set_laddr(iter.get_pin(c)->get_key()); @@ -387,8 +496,8 @@ BtreeLBAManager::scan_mappings( seastar::stop_iteration::yes); } ceph_assert((pos.get_key() + pos.get_val().len) > begin); - f(pos.get_key(), pos.get_val().paddr, pos.get_val().len); - return typename LBABtree::iterate_repeat_ret_inner( + f(pos.get_key(), pos.get_val().pladdr.get_paddr(), pos.get_val().len); + return LBABtree::iterate_repeat_ret_inner( interruptible::ready_future_marker{}, seastar::stop_iteration::no); }); @@ -439,8 +548,9 @@ BtreeLBAManager::update_mapping( const lba_map_val_t &in) { assert(!addr.is_null()); lba_map_val_t ret = in; - ceph_assert(in.paddr == prev_addr); - ret.paddr = addr; + ceph_assert(in.pladdr.is_paddr()); + ceph_assert(in.pladdr.get_paddr() == prev_addr); + ret.pladdr = addr; return ret; }, nextent @@ -506,11 +616,62 @@ void BtreeLBAManager::register_metrics() ); } +BtreeLBAManager::ref_iertr::future>> +BtreeLBAManager::_decref_intermediate( + Transaction &t, + laddr_t addr, + extent_len_t len) +{ + auto c = get_context(t); + return with_btree( + cache, + c, + [c, addr, len](auto &btree) mutable { + return btree.upper_bound_right( + c, addr + ).si_then([&btree, addr, len, c](auto iter) { + return seastar::do_with( + std::move(iter), + [&btree, addr, len, c](auto &iter) { + ceph_assert(!iter.is_end()); + ceph_assert(iter.get_key() <= addr); + auto val = iter.get_val(); + ceph_assert(iter.get_key() + val.len >= addr + len); + ceph_assert(val.pladdr.is_paddr()); + ceph_assert(val.refcount >= 1); + val.refcount -= 1; + + LOG_PREFIX(BtreeLBAManager::_decref_intermediate); + TRACET("decreased refcount of intermediate key {} -- {}", + c.trans, + iter.get_key(), + val); + + if (!val.refcount) { + return btree.remove(c, iter + ).si_then([val] { + return std::make_optional< + std::pair>( + val.pladdr.get_paddr(), val.len); + }); + } else { + return btree.update(c, iter, val, nullptr + ).si_then([](auto) { + return seastar::make_ready_future< + std::optional>>(std::nullopt); + }); + } + }); + }); + }); +} + BtreeLBAManager::update_refcount_ret BtreeLBAManager::update_refcount( Transaction &t, laddr_t addr, - int delta) + int delta, + bool cascade_remove) { LOG_PREFIX(BtreeLBAManager::update_refcount); TRACET("laddr={}, delta={}", t, addr, delta); @@ -524,13 +685,32 @@ BtreeLBAManager::update_refcount( return out; }, nullptr - ).si_then([&t, addr, delta, FNAME](auto result) { + ).si_then([&t, addr, delta, FNAME, this, cascade_remove](auto result) { DEBUGT("laddr={}, delta={} done -- {}", t, addr, delta, result); - return ref_update_result_t{ - result.refcount, - result.paddr, - result.len - }; + auto fut = ref_iertr::make_ready_future< + std::optional>>(); + if (!result.refcount && result.pladdr.is_laddr() && cascade_remove) { + fut = _decref_intermediate( + t, + result.pladdr.get_laddr(), + result.len + ); + } + return fut.si_then([result](auto removed) { + if (result.pladdr.is_laddr() + && removed) { + return ref_update_result_t{ + result.refcount, + removed->first, + removed->second}; + } else { + return ref_update_result_t{ + result.refcount, + result.pladdr, + result.len + }; + } + }); }); } diff --git a/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.h b/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.h index b48abf945..892600ed0 100644 --- a/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.h +++ b/ceph/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.h @@ -26,6 +26,36 @@ namespace crimson::os::seastore::lba_manager::btree { class BtreeLBAMapping : public BtreeNodeMapping { +// To support cloning, there are two kinds of lba mappings: +// 1. physical lba mapping: the pladdr in the value of which is the paddr of +// the corresponding extent; +// 2. indirect lba mapping: the pladdr in the value of which is an laddr pointing +// to the physical lba mapping that's pointing to the actual paddr of the +// extent being searched; +// +// Accordingly, BtreeLBAMapping may also work under two modes: indirect or direct +// 1. BtreeLBAMappings that come from quering an indirect lba mapping in the lba tree +// are indirect; +// 2. BtreeLBAMappings that come from quering a physical lba mapping in the lba tree +// are direct. +// +// For direct BtreeLBAMappings, there are two important fields: +// 1. key: the laddr of the lba mapping being queried; +// 2. paddr: the paddr recorded in the value of the lba mapping being queried. +// For indirect BtreeLBAMappings, BtreeLBAMapping has three important fields: +// 1. key: the laddr key of the lba entry being queried; +// 2. intermediate_key: the laddr within the scope of the physical lba mapping +// that the current indirect lba mapping points to; although an indirect mapping +// points to the start of the physical lba mapping, it may change to other +// laddr after remap +// 3. intermediate_base: the laddr key of the physical lba mapping, intermediate_key +// and intermediate_base should be the same when doing cloning +// 4. intermediate_offset: intermediate_key - intermediate_base +// 5. paddr: the paddr recorded in the physical lba mapping pointed to by the +// indirect lba mapping being queried; +// +// NOTE THAT, for direct BtreeLBAMappings, their intermediate_keys are the same as +// their keys. public: BtreeLBAMapping(op_context_t ctx) : BtreeNodeMapping(ctx) {} @@ -34,17 +64,112 @@ public: CachedExtentRef parent, uint16_t pos, lba_map_val_t &val, - lba_node_meta_t &&meta) + lba_node_meta_t meta) : BtreeNodeMapping( c, parent, pos, - val.paddr, + val.pladdr.is_paddr() ? val.pladdr.get_paddr() : P_ADDR_NULL, val.len, - std::forward(meta)) + meta), + key(meta.begin), + indirect(val.pladdr.is_laddr() ? true : false), + intermediate_key(indirect ? val.pladdr.get_laddr() : L_ADDR_NULL), + intermediate_length(indirect ? val.len : 0), + raw_val(val.pladdr), + map_val(val) {} + + lba_map_val_t get_map_val() const { + return map_val; + } + + bool is_indirect() const final { + return indirect; + } + + void set_key_for_indirect( + laddr_t new_key, + extent_len_t length, + laddr_t interkey = L_ADDR_NULL) + { + turn_indirect(interkey); + key = new_key; + intermediate_length = len; + len = length; + } + + laddr_t get_key() const final { + return key; + } + + pladdr_t get_raw_val() const { + return raw_val; + } + + void set_paddr(paddr_t addr) { + value = addr; + } + + laddr_t get_intermediate_key() const final { + assert(is_indirect()); + assert(intermediate_key != L_ADDR_NULL); + return intermediate_key; + } + + laddr_t get_intermediate_base() const final { + assert(is_indirect()); + assert(intermediate_base != L_ADDR_NULL); + return intermediate_base; + } + + extent_len_t get_intermediate_offset() const final { + assert(intermediate_key >= intermediate_base); + assert((intermediate_key == L_ADDR_NULL) + == (intermediate_base == L_ADDR_NULL)); + return intermediate_key - intermediate_base; + } + + extent_len_t get_intermediate_length() const final { + assert(is_indirect()); + assert(intermediate_length); + return intermediate_length; + } + + void set_intermediate_base(laddr_t base) { + intermediate_base = base; + } + +protected: + std::unique_ptr> _duplicate( + op_context_t ctx) const final { + auto pin = std::unique_ptr(new BtreeLBAMapping(ctx)); + pin->key = key; + pin->intermediate_base = intermediate_base; + pin->intermediate_key = intermediate_key; + pin->indirect = indirect; + pin->raw_val = raw_val; + pin->map_val = map_val; + return pin; + } +private: + void turn_indirect(laddr_t interkey) { + assert(value.is_paddr()); + intermediate_base = key; + intermediate_key = (interkey == L_ADDR_NULL ? key : interkey); + indirect = true; + } + laddr_t key = L_ADDR_NULL; + bool indirect = false; + laddr_t intermediate_key = L_ADDR_NULL; + laddr_t intermediate_base = L_ADDR_NULL; + extent_len_t intermediate_length = 0; + pladdr_t raw_val; + lba_map_val_t map_val; }; +using BtreeLBAMappingRef = std::unique_ptr; + using LBABtree = FixedKVBtree< laddr_t, lba_map_val_t, LBAInternalNode, LBALeafNode, BtreeLBAMapping, LBA_BLOCK_SIZE, true>; @@ -89,23 +214,75 @@ public: Transaction &t, laddr_t offset) final; + alloc_extent_ret reserve_region( + Transaction &t, + laddr_t hint, + extent_len_t len) + { + return _alloc_extent( + t, + hint, + len, + P_ADDR_ZERO, + P_ADDR_NULL, + L_ADDR_NULL, + nullptr); + } + + alloc_extent_ret clone_extent( + Transaction &t, + laddr_t hint, + extent_len_t len, + laddr_t intermediate_key, + paddr_t actual_addr, + laddr_t intermediate_base) + { + return _alloc_extent( + t, + hint, + len, + intermediate_key, + actual_addr, + intermediate_base, + nullptr); + } + alloc_extent_ret alloc_extent( Transaction &t, laddr_t hint, extent_len_t len, paddr_t addr, - LogicalCachedExtent*) final; + LogicalCachedExtent &ext) final + { + return _alloc_extent( + t, + hint, + len, + addr, + P_ADDR_NULL, + L_ADDR_NULL, + &ext); + } ref_ret decref_extent( Transaction &t, - laddr_t addr) final { - return update_refcount(t, addr, -1); + laddr_t addr, + bool cascade_remove) final { + return update_refcount(t, addr, -1, cascade_remove); } ref_ret incref_extent( Transaction &t, laddr_t addr) final { - return update_refcount(t, addr, 1); + return update_refcount(t, addr, 1, false); + } + + ref_ret incref_extent( + Transaction &t, + laddr_t addr, + int delta) final { + ceph_assert(delta > 0); + return update_refcount(t, addr, delta, false); } /** @@ -170,7 +347,8 @@ private: update_refcount_ret update_refcount( Transaction &t, laddr_t addr, - int delta); + int delta, + bool cascade_remove); /** * _update_mapping @@ -187,6 +365,31 @@ private: laddr_t addr, update_func_t &&f, LogicalCachedExtent*); + + alloc_extent_ret _alloc_extent( + Transaction &t, + laddr_t hint, + extent_len_t len, + pladdr_t addr, + paddr_t actual_addr, + laddr_t intermediate_base, + LogicalCachedExtent*); + + using _get_mapping_ret = get_mapping_iertr::future; + _get_mapping_ret _get_mapping( + Transaction &t, + laddr_t offset); + + using _get_original_mappings_ret = get_mappings_ret; + _get_original_mappings_ret _get_original_mappings( + op_context_t c, + std::list &pin_list); + + ref_iertr::future>> + _decref_intermediate( + Transaction &t, + laddr_t addr, + extent_len_t len); }; using BtreeLBAManagerRef = std::unique_ptr; diff --git a/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc b/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc index a33f75917..66dc94394 100644 --- a/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc +++ b/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc @@ -20,7 +20,7 @@ namespace crimson::os::seastore::lba_manager::btree { std::ostream& operator<<(std::ostream& out, const lba_map_val_t& v) { return out << "lba_map_val_t(" - << v.paddr + << v.pladdr << "~" << v.len << ", refcount=" << v.refcount << ", checksum=" << v.checksum @@ -42,10 +42,11 @@ void LBALeafNode::resolve_relative_addrs(paddr_t base) { LOG_PREFIX(LBALeafNode::resolve_relative_addrs); for (auto i: *this) { - if (i->get_val().paddr.is_relative()) { - auto val = i->get_val(); - val.paddr = base.add_relative(val.paddr); - TRACE("{} -> {}", i->get_val().paddr, val.paddr); + auto val = i->get_val(); + if (val.pladdr.is_paddr() && + val.pladdr.get_paddr().is_relative()) { + val.pladdr = base.add_relative(val.pladdr.get_paddr()); + TRACE("{} -> {}", i->get_val().pladdr, val.pladdr); i->set_val(val); } } diff --git a/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h b/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h index 62ceae6cc..ffce2c1b5 100644 --- a/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h +++ b/ceph/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h @@ -33,17 +33,18 @@ using LBANode = FixedKVNode; */ struct lba_map_val_t { extent_len_t len = 0; ///< length of mapping - paddr_t paddr; ///< physical addr of mapping + pladdr_t pladdr; ///< physical addr of mapping or + // laddr of a physical lba mapping(see btree_lba_manager.h) uint32_t refcount = 0; ///< refcount uint32_t checksum = 0; ///< checksum of original block written at paddr (TODO) lba_map_val_t() = default; lba_map_val_t( extent_len_t len, - paddr_t paddr, + pladdr_t pladdr, uint32_t refcount, uint32_t checksum) - : len(len), paddr(paddr), refcount(refcount), checksum(checksum) {} + : len(len), pladdr(pladdr), refcount(refcount), checksum(checksum) {} bool operator==(const lba_map_val_t&) const = default; }; @@ -103,14 +104,14 @@ using LBAInternalNodeRef = LBAInternalNode::Ref; * size : uint32_t[1] 4b * (padding) : 4b * meta : lba_node_meta_le_t[3] (1*24)b - * keys : laddr_t[170] (145*8)b - * values : lba_map_val_t[170] (145*20)b + * keys : laddr_t[170] (140*8)b + * values : lba_map_val_t[170] (140*21)b * = 4092 * * TODO: update FixedKVNodeLayout to handle the above calculation * TODO: the above alignment probably isn't portable without further work */ -constexpr size_t LEAF_NODE_CAPACITY = 145; +constexpr size_t LEAF_NODE_CAPACITY = 140; /** * lba_map_val_le_t @@ -119,7 +120,7 @@ constexpr size_t LEAF_NODE_CAPACITY = 145; */ struct lba_map_val_le_t { extent_len_le_t len = init_extent_len_le(0); - paddr_le_t paddr; + pladdr_le_t pladdr; ceph_le32 refcount{0}; ceph_le32 checksum{0}; @@ -127,12 +128,12 @@ struct lba_map_val_le_t { lba_map_val_le_t(const lba_map_val_le_t &) = default; explicit lba_map_val_le_t(const lba_map_val_t &val) : len(init_extent_len_le(val.len)), - paddr(paddr_le_t(val.paddr)), + pladdr(pladdr_le_t(val.pladdr)), refcount(val.refcount), checksum(val.checksum) {} operator lba_map_val_t() const { - return lba_map_val_t{ len, paddr, refcount, checksum }; + return lba_map_val_t{ len, pladdr, refcount, checksum }; } }; @@ -195,7 +196,9 @@ struct LBALeafNode // child-ptr may already be correct, see LBAManager::update_mappings() this->update_child_ptr(iter, nextent); } - val.paddr = this->maybe_generate_relative(val.paddr); + if (val.pladdr.is_paddr()) { + val.pladdr = maybe_generate_relative(val.pladdr.get_paddr()); + } return this->journal_update( iter, val, @@ -214,7 +217,9 @@ struct LBALeafNode addr, (void*)nextent); this->insert_child_ptr(iter, nextent); - val.paddr = this->maybe_generate_relative(val.paddr); + if (val.pladdr.is_paddr()) { + val.pladdr = maybe_generate_relative(val.pladdr.get_paddr()); + } this->journal_insert( iter, addr, @@ -245,9 +250,10 @@ struct LBALeafNode if (this->is_initial_pending()) { for (auto i = from; i != to; ++i) { auto val = i->get_val(); - if (val.paddr.is_relative()) { - assert(val.paddr.is_block_relative()); - val.paddr = this->get_paddr().add_relative(val.paddr); + if (val.pladdr.is_paddr() + && val.pladdr.get_paddr().is_relative()) { + assert(val.pladdr.get_paddr().is_block_relative()); + val.pladdr = this->get_paddr().add_relative(val.pladdr.get_paddr()); i->set_val(val); } } @@ -260,10 +266,10 @@ struct LBALeafNode if (this->is_initial_pending()) { for (auto i = from; i != to; ++i) { auto val = i->get_val(); - if (val.paddr.is_relative()) { - auto val = i->get_val(); - assert(val.paddr.is_record_relative()); - val.paddr = val.paddr.block_relative_to(this->get_paddr()); + if (val.pladdr.is_paddr() + && val.pladdr.get_paddr().is_relative()) { + assert(val.pladdr.get_paddr().is_record_relative()); + val.pladdr = val.pladdr.get_paddr().block_relative_to(this->get_paddr()); i->set_val(val); } } diff --git a/ceph/src/crimson/os/seastore/object_data_handler.cc b/ceph/src/crimson/os/seastore/object_data_handler.cc index 76e179e24..0d852696b 100644 --- a/ceph/src/crimson/os/seastore/object_data_handler.cc +++ b/ceph/src/crimson/os/seastore/object_data_handler.cc @@ -25,12 +25,10 @@ using get_iertr = ObjectDataHandler::write_iertr; /** * extent_to_write_t * - * Encapsulates extents to be written out using do_insertions. + * Encapsulates smallest write operations in overwrite. * Indicates a zero/existing extent or a data extent based on whether * to_write is populate. - * The meaning of existing_paddr is that the new extent to be - * written is the part of exising extent on the disk. existing_paddr - * must be absolute. + * Should be handled by prepare_ops_list. */ struct extent_to_write_t { enum class type_t { @@ -38,16 +36,18 @@ struct extent_to_write_t { ZERO, EXISTING, }; - type_t type; + + /// pin of original extent, not nullptr if type == EXISTING + LBAMappingRef pin; + laddr_t addr; extent_len_t len; + /// non-nullopt if and only if type == DATA std::optional to_write; - /// non-nullopt if and only if type == EXISTING - std::optional existing_paddr; - extent_to_write_t(const extent_to_write_t &) = default; + extent_to_write_t(const extent_to_write_t &) = delete; extent_to_write_t(extent_to_write_t &&) = default; bool is_data() const { @@ -72,13 +72,14 @@ struct extent_to_write_t { } static extent_to_write_t create_zero( - laddr_t addr, extent_len_t len) { + laddr_t addr, extent_len_t len) { return extent_to_write_t(addr, len); } static extent_to_write_t create_existing( - laddr_t addr, paddr_t existing_paddr, extent_len_t len) { - return extent_to_write_t(addr, existing_paddr, len); + LBAMappingRef &&pin, laddr_t addr, extent_len_t len) { + assert(pin); + return extent_to_write_t(std::move(pin), addr, len); } private: @@ -89,12 +90,211 @@ private: extent_to_write_t(laddr_t addr, extent_len_t len) : type(type_t::ZERO), addr(addr), len(len) {} - extent_to_write_t(laddr_t addr, paddr_t existing_paddr, extent_len_t len) - : type(type_t::EXISTING), addr(addr), len(len), - to_write(std::nullopt), existing_paddr(existing_paddr) {} + extent_to_write_t(LBAMappingRef &&pin, laddr_t addr, extent_len_t len) + : type(type_t::EXISTING), pin(std::move(pin)), addr(addr), len(len) {} }; using extent_to_write_list_t = std::list; +// Encapsulates extents to be written out using do_remappings. +struct extent_to_remap_t { + enum class type_t { + REMAP, + OVERWRITE + }; + type_t type; + /// pin of original extent + LBAMappingRef pin; + /// offset of remapped extent or overwrite part of overwrite extent. + /// overwrite part of overwrite extent might correspond to mutiple + /// fresh write extent. + extent_len_t new_offset; + /// length of remapped extent or overwrite part of overwrite extent + extent_len_t new_len; + + extent_to_remap_t(const extent_to_remap_t &) = delete; + extent_to_remap_t(extent_to_remap_t &&) = default; + + bool is_remap() const { + return type == type_t::REMAP; + } + + bool is_overwrite() const { + assert((new_offset != 0) && (pin->get_length() != new_offset + new_len)); + return type == type_t::OVERWRITE; + } + + using remap_entry = TransactionManager::remap_entry; + remap_entry create_remap_entry() { + assert(is_remap()); + return remap_entry( + new_offset, + new_len); + } + + remap_entry create_left_remap_entry() { + assert(is_overwrite()); + return remap_entry( + 0, + new_offset); + } + + remap_entry create_right_remap_entry() { + assert(is_overwrite()); + return remap_entry( + new_offset + new_len, + pin->get_length() - new_offset - new_len); + } + + static extent_to_remap_t create_remap( + LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len) { + return extent_to_remap_t(type_t::REMAP, + std::move(pin), new_offset, new_len); + } + + static extent_to_remap_t create_overwrite( + LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len) { + return extent_to_remap_t(type_t::OVERWRITE, + std::move(pin), new_offset, new_len); + } + +private: + extent_to_remap_t(type_t type, + LBAMappingRef &&pin, extent_len_t new_offset, extent_len_t new_len) + : type(type), + pin(std::move(pin)), new_offset(new_offset), new_len(new_len) {} +}; +using extent_to_remap_list_t = std::list; + +// Encapsulates extents to be written out using do_insertions. +struct extent_to_insert_t { + enum class type_t { + DATA, + ZERO + }; + type_t type; + /// laddr of new extent + laddr_t addr; + /// length of new extent + extent_len_t len; + /// non-nullopt if type == DATA + std::optional bl; + + extent_to_insert_t(const extent_to_insert_t &) = default; + extent_to_insert_t(extent_to_insert_t &&) = default; + + bool is_data() const { + return type == type_t::DATA; + } + + bool is_zero() const { + return type == type_t::ZERO; + } + + static extent_to_insert_t create_data( + laddr_t addr, extent_len_t len, std::optional bl) { + return extent_to_insert_t(addr, len, bl); + } + + static extent_to_insert_t create_zero( + laddr_t addr, extent_len_t len) { + return extent_to_insert_t(addr, len); + } + +private: + extent_to_insert_t(laddr_t addr, extent_len_t len, + std::optional bl) + :type(type_t::DATA), addr(addr), len(len), bl(bl) {} + + extent_to_insert_t(laddr_t addr, extent_len_t len) + :type(type_t::ZERO), addr(addr), len(len) {} +}; +using extent_to_insert_list_t = std::list; + +// Encapsulates extents to be retired in do_removals. +using extent_to_remove_list_t = std::list; + +struct overwrite_ops_t { + extent_to_remap_list_t to_remap; + extent_to_insert_list_t to_insert; + extent_to_remove_list_t to_remove; +}; + +// prepare to_remap, to_retire, to_insert list +overwrite_ops_t prepare_ops_list( + lba_pin_list_t &pins_to_remove, + extent_to_write_list_t &to_write) { + assert(pins_to_remove.size() != 0); + overwrite_ops_t ops; + ops.to_remove.swap(pins_to_remove); + if (to_write.empty()) { + logger().debug("empty to_write"); + return ops; + } + long unsigned int visitted = 0; + auto& front = to_write.front(); + auto& back = to_write.back(); + + // prepare overwrite, happens in one original extent. + if (ops.to_remove.size() == 1 && + front.is_existing() && back.is_existing()) { + visitted += 2; + assert(to_write.size() > 2); + assert(front.addr == front.pin->get_key()); + assert(back.addr > back.pin->get_key()); + ops.to_remap.push_back(extent_to_remap_t::create_overwrite( + std::move(front.pin), + front.len, + back.addr - front.addr - front.len)); + ops.to_remove.pop_front(); + } else { + // prepare to_remap, happens in one or multiple extents + if (front.is_existing()) { + visitted++; + assert(to_write.size() > 1); + assert(front.addr == front.pin->get_key()); + ops.to_remap.push_back(extent_to_remap_t::create_remap( + std::move(front.pin), + 0, + front.len)); + ops.to_remove.pop_front(); + } + if (back.is_existing()) { + visitted++; + assert(to_write.size() > 1); + assert(back.addr + back.len == + back.pin->get_key() + back.pin->get_length()); + ops.to_remap.push_back(extent_to_remap_t::create_remap( + std::move(back.pin), + back.addr - back.pin->get_key(), + back.len)); + ops.to_remove.pop_back(); + } + } + + // prepare to_insert + for (auto ®ion : to_write) { + if (region.is_data()) { + visitted++; + assert(region.to_write.has_value()); + ops.to_insert.push_back(extent_to_insert_t::create_data( + region.addr, region.len, region.to_write)); + } else if (region.is_zero()) { + visitted++; + assert(!(region.to_write.has_value())); + ops.to_insert.push_back(extent_to_insert_t::create_zero( + region.addr, region.len)); + } + } + + logger().debug( + "to_remap list size: {}" + " to_insert list size: {}" + " to_remove list size: {}", + ops.to_remap.size(), ops.to_insert.size(), ops.to_remove.size()); + assert(visitted == to_write.size()); + return ops; +} + /** * append_extent_to_write * @@ -134,13 +334,54 @@ void splice_extent_to_write( } } -/// Removes extents/mappings in pins +/// Creates remap extents in to_remap +ObjectDataHandler::write_ret do_remappings( + context_t ctx, + extent_to_remap_list_t &to_remap) +{ + return trans_intr::do_for_each( + to_remap, + [ctx](auto ®ion) { + if (region.is_remap()) { + return ctx.tm.remap_pin( + ctx.t, + std::move(region.pin), + std::array{ + region.create_remap_entry() + } + ).si_then([®ion](auto pins) { + ceph_assert(pins.size() == 1); + ceph_assert(region.new_len == pins[0]->get_length()); + return ObjectDataHandler::write_iertr::now(); + }); + } else if (region.is_overwrite()) { + return ctx.tm.remap_pin( + ctx.t, + std::move(region.pin), + std::array{ + region.create_left_remap_entry(), + region.create_right_remap_entry() + } + ).si_then([®ion](auto pins) { + ceph_assert(pins.size() == 2); + ceph_assert(region.pin->get_key() == pins[0]->get_key()); + ceph_assert(region.pin->get_key() + pins[0]->get_length() + + region.new_len == pins[1]->get_key()); + return ObjectDataHandler::write_iertr::now(); + }); + } else { + ceph_abort("impossible"); + return ObjectDataHandler::write_iertr::now(); + } + }); +} + ObjectDataHandler::write_ret do_removals( context_t ctx, - lba_pin_list_t &pins) + lba_pin_list_t &to_remove) { return trans_intr::do_for_each( - pins, + to_remove, [ctx](auto &pin) { LOG_PREFIX(object_data_handler.cc::do_removals); DEBUGT("decreasing ref: {}", @@ -159,19 +400,19 @@ ObjectDataHandler::write_ret do_removals( }); } -/// Creates zero/data extents in to_write +/// Creates zero/data extents in to_insert ObjectDataHandler::write_ret do_insertions( context_t ctx, - extent_to_write_list_t &to_write) + extent_to_insert_list_t &to_insert) { return trans_intr::do_for_each( - to_write, + to_insert, [ctx](auto ®ion) { LOG_PREFIX(object_data_handler.cc::do_insertions); if (region.is_data()) { assert_aligned(region.addr); assert_aligned(region.len); - ceph_assert(region.len == region.to_write->length()); + ceph_assert(region.len == region.bl->length()); DEBUGT("allocating extent: {}~{}", ctx.t, region.addr, @@ -190,7 +431,7 @@ ObjectDataHandler::write_ret do_insertions( } ceph_assert(extent->get_laddr() == region.addr); ceph_assert(extent->get_length() == region.len); - auto iter = region.to_write->cbegin(); + auto iter = region.bl->cbegin(); iter.copy(region.len, extent->get_bptr().c_str()); return ObjectDataHandler::write_iertr::now(); }); @@ -216,25 +457,8 @@ ObjectDataHandler::write_ret do_insertions( return ObjectDataHandler::write_iertr::now(); }); } else { - ceph_assert(region.is_existing()); - DEBUGT("map existing extent: laddr {} len {} {}", - ctx.t, region.addr, region.len, *region.existing_paddr); - return ctx.tm.map_existing_extent( - ctx.t, region.addr, *region.existing_paddr, region.len - ).handle_error_interruptible( - TransactionManager::alloc_extent_iertr::pass_further{}, - Device::read_ertr::assert_all{"ignore read error"} - ).si_then([FNAME, ctx, ®ion](auto extent) { - if (extent->get_laddr() != region.addr) { - ERRORT( - "inconsistent laddr: extent: {} region {}", - ctx.t, - extent->get_laddr(), - region.addr); - } - ceph_assert(extent->get_laddr() == region.addr); - return ObjectDataHandler::write_iertr::now(); - }); + ceph_abort("impossible"); + return ObjectDataHandler::write_iertr::now(); } }); } @@ -352,7 +576,8 @@ public: overwrite_plan_t(laddr_t offset, extent_len_t len, const lba_pin_list_t& pins, - extent_len_t block_size) : + extent_len_t block_size, + Transaction& t) : pin_begin(pins.front()->get_key()), pin_end(pins.back()->get_key() + pins.back()->get_length()), left_paddr(pins.front()->get_val()), @@ -365,7 +590,7 @@ public: right_operation(overwrite_operation_t::UNKNOWN), block_size(block_size) { validate(); - evaluate_operations(); + evaluate_operations(t); assert(left_operation != overwrite_operation_t::UNKNOWN); assert(right_operation != overwrite_operation_t::UNKNOWN); } @@ -393,19 +618,31 @@ private: * original extent into at most three parts: origin-left, part-to-be-modified * and origin-right. */ - void evaluate_operations() { + void evaluate_operations(Transaction& t) { auto actual_write_size = get_pins_size(); auto aligned_data_size = get_aligned_data_size(); auto left_ext_size = get_left_extent_size(); auto right_ext_size = get_right_extent_size(); + auto can_merge = [](Transaction& t, paddr_t paddr) { + CachedExtentRef ext; + if (paddr.is_relative() || paddr.is_delayed()) { + return true; + } else if (t.get_extent(paddr, &ext) == + Transaction::get_extent_ret::PRESENT) { + // FIXME: there is no need to lookup the cache if the pin can + // be associated with the extent state + if (ext->is_mutable()) { + return true; + } + } + return false; + }; if (left_paddr.is_zero()) { actual_write_size -= left_ext_size; left_ext_size = 0; left_operation = overwrite_operation_t::OVERWRITE_ZERO; - // FIXME: left_paddr can be absolute and pending - } else if (left_paddr.is_relative() || - left_paddr.is_delayed()) { + } else if (can_merge(t, left_paddr)) { aligned_data_size += left_ext_size; left_ext_size = 0; left_operation = overwrite_operation_t::MERGE_EXISTING; @@ -415,9 +652,7 @@ private: actual_write_size -= right_ext_size; right_ext_size = 0; right_operation = overwrite_operation_t::OVERWRITE_ZERO; - // FIXME: right_paddr can be absolute and pending - } else if (right_paddr.is_relative() || - right_paddr.is_delayed()) { + } else if (can_merge(t, right_paddr)) { aligned_data_size += right_ext_size; right_ext_size = 0; right_operation = overwrite_operation_t::MERGE_EXISTING; @@ -506,14 +741,15 @@ operate_ret operate_left(context_t ctx, LBAMappingRef &pin, const overwrite_plan std::nullopt, std::nullopt); } else { + extent_len_t off = pin->get_intermediate_offset(); return ctx.tm.read_pin( ctx.t, pin->duplicate() - ).si_then([prepend_len](auto left_extent) { + ).si_then([prepend_len, off](auto left_extent) { return get_iertr::make_ready_future( std::nullopt, std::make_optional(bufferptr( left_extent->get_bptr(), - 0, + off, prepend_len))); }); } @@ -524,23 +760,24 @@ operate_ret operate_left(context_t ctx, LBAMappingRef &pin, const overwrite_plan assert(extent_len); std::optional left_to_write_extent = std::make_optional(extent_to_write_t::create_existing( - overwrite_plan.pin_begin, - overwrite_plan.left_paddr, + pin->duplicate(), + pin->get_key(), extent_len)); auto prepend_len = overwrite_plan.get_left_alignment_size(); if (prepend_len == 0) { return get_iertr::make_ready_future( - left_to_write_extent, + std::move(left_to_write_extent), std::nullopt); } else { + extent_len_t off = pin->get_intermediate_offset(); return ctx.tm.read_pin( ctx.t, pin->duplicate() - ).si_then([prepend_offset=extent_len, prepend_len, + ).si_then([prepend_offset=extent_len + off, prepend_len, left_to_write_extent=std::move(left_to_write_extent)] (auto left_extent) mutable { return get_iertr::make_ready_future( - left_to_write_extent, + std::move(left_to_write_extent), std::make_optional(bufferptr( left_extent->get_bptr(), prepend_offset, @@ -587,7 +824,10 @@ operate_ret operate_right(context_t ctx, LBAMappingRef &pin, const overwrite_pla std::nullopt, std::nullopt); } else { - auto append_offset = overwrite_plan.data_end - right_pin_begin; + auto append_offset = + overwrite_plan.data_end + - right_pin_begin + + pin->get_intermediate_offset(); return ctx.tm.read_pin( ctx.t, pin->duplicate() ).si_then([append_offset, append_len](auto right_extent) { @@ -606,24 +846,27 @@ operate_ret operate_right(context_t ctx, LBAMappingRef &pin, const overwrite_pla assert(extent_len); std::optional right_to_write_extent = std::make_optional(extent_to_write_t::create_existing( + pin->duplicate(), overwrite_plan.aligned_data_end, - overwrite_plan.right_paddr.add_offset(overwrite_plan.aligned_data_end - right_pin_begin), extent_len)); auto append_len = overwrite_plan.get_right_alignment_size(); if (append_len == 0) { return get_iertr::make_ready_future( - right_to_write_extent, + std::move(right_to_write_extent), std::nullopt); } else { - auto append_offset = overwrite_plan.data_end - right_pin_begin; + auto append_offset = + overwrite_plan.data_end + - right_pin_begin + + pin->get_intermediate_offset(); return ctx.tm.read_pin( ctx.t, pin->duplicate() ).si_then([append_offset, append_len, right_to_write_extent=std::move(right_to_write_extent)] (auto right_extent) mutable { return get_iertr::make_ready_future( - right_to_write_extent, + std::move(right_to_write_extent), std::make_optional(bufferptr( right_extent->get_bptr(), append_offset, @@ -652,6 +895,31 @@ auto with_object_data( }); } +template +auto with_objects_data( + ObjectDataHandler::context_t ctx, + F &&f) +{ + ceph_assert(ctx.d_onode); + return seastar::do_with( + ctx.onode.get_layout().object_data.get(), + ctx.d_onode->get_layout().object_data.get(), + std::forward(f), + [ctx](auto &object_data, auto &d_object_data, auto &f) { + return std::invoke(f, object_data, d_object_data + ).si_then([ctx, &object_data, &d_object_data] { + if (object_data.must_update()) { + ctx.onode.get_mutable_layout(ctx.t).object_data.update(object_data); + } + if (d_object_data.must_update()) { + ctx.d_onode->get_mutable_layout( + ctx.t).object_data.update(d_object_data); + } + return seastar::now(); + }); + }); +} + ObjectDataHandler::write_ret ObjectDataHandler::prepare_data_reservation( context_t ctx, object_data_t &object_data, @@ -706,6 +974,11 @@ ObjectDataHandler::clear_ret ObjectDataHandler::trim_data_reservation( ).si_then([ctx, size, &pins, &object_data, &to_write](auto _pins) { _pins.swap(pins); ceph_assert(pins.size()); + if (!size) { + // no need to reserve region if we are truncating the object's + // size to 0 + return clear_iertr::now(); + } auto &pin = *pins.front(); ceph_assert(pin.get_key() >= object_data.get_reserved_data_base()); ceph_assert( @@ -721,41 +994,67 @@ ObjectDataHandler::clear_ret ObjectDataHandler::trim_data_reservation( object_data.get_reserved_data_len() - pin_offset)); return clear_iertr::now(); } else { - /* First pin overlaps the boundary and has data, read in extent - * and rewrite portion prior to size */ - return ctx.tm.read_pin( - ctx.t, - pin.duplicate() - ).si_then([ctx, size, pin_offset, &pin, &object_data, &to_write]( - auto extent) { - bufferlist bl; - bl.append( - bufferptr( - extent->get_bptr(), - 0, - size - pin_offset - )); - bl.append_zero(p2roundup(size, ctx.tm.get_block_size()) - size); - to_write.push_back(extent_to_write_t::create_data( - pin.get_key(), - bl)); + /* First pin overlaps the boundary and has data, remap it + * if aligned or rewrite it if not aligned to size */ + auto roundup_size = p2roundup(size, ctx.tm.get_block_size()); + auto append_len = roundup_size - size; + if (append_len == 0) { + LOG_PREFIX(ObjectDataHandler::trim_data_reservation); + TRACET("First pin overlaps the boundary and has aligned data" + "create existing at addr:{}, len:{}", + ctx.t, pin.get_key(), size - pin_offset); + to_write.push_back(extent_to_write_t::create_existing( + pin.duplicate(), + pin.get_key(), + size - pin_offset)); to_write.push_back(extent_to_write_t::create_zero( - object_data.get_reserved_data_base() + - p2roundup(size, ctx.tm.get_block_size()), - object_data.get_reserved_data_len() - - p2roundup(size, ctx.tm.get_block_size()))); - return clear_iertr::now(); - }); - } - }).si_then([ctx, &pins] { - return do_removals(ctx, pins); - }).si_then([ctx, &to_write] { - return do_insertions(ctx, to_write); - }).si_then([size, &object_data] { - if (size == 0) { - object_data.clear(); + object_data.get_reserved_data_base() + roundup_size, + object_data.get_reserved_data_len() - roundup_size)); + return clear_iertr::now(); + } else { + return ctx.tm.read_pin( + ctx.t, + pin.duplicate() + ).si_then([ctx, size, pin_offset, append_len, roundup_size, + &pin, &object_data, &to_write](auto extent) { + bufferlist bl; + bl.append( + bufferptr( + extent->get_bptr(), + pin.get_intermediate_offset(), + size - pin_offset + )); + bl.append_zero(append_len); + LOG_PREFIX(ObjectDataHandler::trim_data_reservation); + TRACET("First pin overlaps the boundary and has unaligned data" + "create data at addr:{}, len:{}", + ctx.t, pin.get_key(), bl.length()); + to_write.push_back(extent_to_write_t::create_data( + pin.get_key(), + bl)); + to_write.push_back(extent_to_write_t::create_zero( + object_data.get_reserved_data_base() + roundup_size, + object_data.get_reserved_data_len() - roundup_size)); + return clear_iertr::now(); + }); + } } - return ObjectDataHandler::clear_iertr::now(); + }).si_then([ctx, size, &to_write, &object_data, &pins] { + return seastar::do_with( + prepare_ops_list(pins, to_write), + [ctx, size, &object_data](auto &ops) { + return do_remappings(ctx, ops.to_remap + ).si_then([ctx, &ops] { + return do_removals(ctx, ops.to_remove); + }).si_then([ctx, &ops] { + return do_insertions(ctx, ops.to_insert); + }).si_then([size, &object_data] { + if (size == 0) { + object_data.clear(); + } + return ObjectDataHandler::clear_iertr::now(); + }); + }); }); }); } @@ -806,7 +1105,9 @@ extent_to_write_list_t get_to_writes_with_zero_buffer( } assert(bl.length() % block_size == 0); assert(bl.length() == (right - left)); - return {extent_to_write_t::create_data(left, bl)}; + extent_to_write_list_t ret; + ret.push_back(extent_to_write_t::create_data(left, bl)); + return ret; } else { // reserved section between ends, headptr and tailptr in different extents extent_to_write_list_t ret; @@ -858,7 +1159,7 @@ ObjectDataHandler::write_ret ObjectDataHandler::overwrite( if (bl.has_value()) { assert(bl->length() == len); } - overwrite_plan_t overwrite_plan(offset, len, _pins, ctx.tm.get_block_size()); + overwrite_plan_t overwrite_plan(offset, len, _pins, ctx.tm.get_block_size(), ctx.t); return seastar::do_with( std::move(_pins), extent_to_write_list_t(), @@ -931,9 +1232,16 @@ ObjectDataHandler::write_ret ObjectDataHandler::overwrite( assert(pin_begin == to_write.front().addr); assert(pin_end == to_write.back().get_end_addr()); - return do_removals(ctx, pins); - }).si_then([ctx, &to_write] { - return do_insertions(ctx, to_write); + return seastar::do_with( + prepare_ops_list(pins, to_write), + [ctx](auto &ops) { + return do_remappings(ctx, ops.to_remap + ).si_then([ctx, &ops] { + return do_removals(ctx, ops.to_remove); + }).si_then([ctx, &ops] { + return do_insertions(ctx, ops.to_insert); + }); + }); }); }); }); @@ -1061,17 +1369,33 @@ ObjectDataHandler::read_ret ObjectDataHandler::read( current = end; return seastar::now(); } else { + LOG_PREFIX(ObjectDataHandler::read); + auto key = pin->get_key(); + bool is_indirect = pin->is_indirect(); + extent_len_t off = pin->get_intermediate_offset(); + DEBUGT("reading {}~{}, indirect: {}, " + "intermediate offset: {}, current: {}, end: {}", + ctx.t, + key, + pin->get_length(), + is_indirect, + off, + current, + end); return ctx.tm.read_pin( ctx.t, std::move(pin) - ).si_then([&ret, ¤t, end](auto extent) { + ).si_then([&ret, ¤t, end, key, off, + is_indirect](auto extent) { ceph_assert( - (extent->get_laddr() + extent->get_length()) >= end); + is_indirect + ? (key - off + extent->get_length()) >= end + : (extent->get_laddr() + extent->get_length()) >= end); ceph_assert(end > current); ret.append( bufferptr( extent->get_bptr(), - current - extent->get_laddr(), + off + current - (is_indirect ? key : extent->get_laddr()), end - current)); current = end; return seastar::now(); @@ -1189,4 +1513,126 @@ ObjectDataHandler::clear_ret ObjectDataHandler::clear( }); } +ObjectDataHandler::clone_ret ObjectDataHandler::clone_extents( + context_t ctx, + object_data_t &object_data, + lba_pin_list_t &pins, + laddr_t data_base) +{ + LOG_PREFIX(ObjectDataHandler::clone_extents); + TRACET(" object_data: {}~{}, data_base: {}", + ctx.t, + object_data.get_reserved_data_base(), + object_data.get_reserved_data_len(), + data_base); + return ctx.tm.dec_ref( + ctx.t, + object_data.get_reserved_data_base() + ).si_then( + [&pins, &object_data, ctx, data_base](auto) mutable { + return seastar::do_with( + (extent_len_t)0, + [&object_data, ctx, data_base, &pins](auto &last_pos) { + return trans_intr::do_for_each( + pins, + [&last_pos, &object_data, ctx, data_base](auto &pin) { + auto offset = pin->get_key() - data_base; + ceph_assert(offset == last_pos); + auto fut = TransactionManager::alloc_extent_iertr + ::make_ready_future(); + auto addr = object_data.get_reserved_data_base() + offset; + if (pin->get_val().is_zero()) { + fut = ctx.tm.reserve_region(ctx.t, addr, pin->get_length()); + } else { + fut = ctx.tm.clone_pin(ctx.t, addr, *pin); + } + return fut.si_then( + [&pin, &last_pos, offset](auto) { + last_pos = offset + pin->get_length(); + return seastar::now(); + }).handle_error_interruptible( + crimson::ct_error::input_output_error::pass_further(), + crimson::ct_error::assert_all("not possible") + ); + }).si_then([&last_pos, &object_data, ctx] { + if (last_pos != object_data.get_reserved_data_len()) { + return ctx.tm.reserve_region( + ctx.t, + object_data.get_reserved_data_base() + last_pos, + object_data.get_reserved_data_len() - last_pos + ).si_then([](auto) { + return seastar::now(); + }); + } + return TransactionManager::reserve_extent_iertr::now(); + }); + }); + }, + ObjectDataHandler::write_iertr::pass_further{}, + crimson::ct_error::assert_all{ + "object_data_handler::clone invalid error" + } + ); +} + +ObjectDataHandler::clone_ret ObjectDataHandler::clone( + context_t ctx) +{ + // the whole clone procedure can be seperated into the following steps: + // 1. let clone onode(d_object_data) take the head onode's + // object data base; + // 2. reserve a new region in lba tree for the head onode; + // 3. clone all extents of the clone onode, see transaction_manager.h + // for the details of clone_pin; + // 4. reserve the space between the head onode's size and its reservation + // length. + return with_objects_data( + ctx, + [ctx, this](auto &object_data, auto &d_object_data) { + ceph_assert(d_object_data.is_null()); + if (object_data.is_null()) { + return clone_iertr::now(); + } + return prepare_data_reservation( + ctx, + d_object_data, + object_data.get_reserved_data_len() + ).si_then([&object_data, &d_object_data, ctx, this] { + assert(!object_data.is_null()); + auto base = object_data.get_reserved_data_base(); + auto len = object_data.get_reserved_data_len(); + object_data.clear(); + LOG_PREFIX(ObjectDataHandler::clone); + DEBUGT("cloned obj reserve_data_base: {}, len {}", + ctx.t, + d_object_data.get_reserved_data_base(), + d_object_data.get_reserved_data_len()); + return prepare_data_reservation( + ctx, + object_data, + d_object_data.get_reserved_data_len() + ).si_then([&d_object_data, ctx, &object_data, base, len, this] { + LOG_PREFIX("ObjectDataHandler::clone"); + DEBUGT("head obj reserve_data_base: {}, len {}", + ctx.t, + object_data.get_reserved_data_base(), + object_data.get_reserved_data_len()); + return ctx.tm.get_pins(ctx.t, base, len + ).si_then([ctx, &object_data, &d_object_data, base, this](auto pins) { + return seastar::do_with( + std::move(pins), + [ctx, &object_data, &d_object_data, base, this](auto &pins) { + return clone_extents(ctx, object_data, pins, base + ).si_then([ctx, &d_object_data, base, &pins, this] { + return clone_extents(ctx, d_object_data, pins, base); + }).si_then([&pins, ctx] { + return do_removals(ctx, pins); + }); + }); + }); + }); + }); + }); +} + } // namespace crimson::os::seastore diff --git a/ceph/src/crimson/os/seastore/object_data_handler.h b/ceph/src/crimson/os/seastore/object_data_handler.h index 6fd73dc76..b5f432d5a 100644 --- a/ceph/src/crimson/os/seastore/object_data_handler.h +++ b/ceph/src/crimson/os/seastore/object_data_handler.h @@ -19,10 +19,12 @@ namespace crimson::os::seastore { struct ObjectDataBlock : crimson::os::seastore::LogicalCachedExtent { using Ref = TCachedExtentRef; - ObjectDataBlock(ceph::bufferptr &&ptr) + explicit ObjectDataBlock(ceph::bufferptr &&ptr) : LogicalCachedExtent(std::move(ptr)) {} - ObjectDataBlock(const ObjectDataBlock &other) + explicit ObjectDataBlock(const ObjectDataBlock &other) : LogicalCachedExtent(other) {} + explicit ObjectDataBlock(extent_len_t length) + : LogicalCachedExtent(length) {} CachedExtentRef duplicate_for_write(Transaction&) final { return CachedExtentRef(new ObjectDataBlock(*this)); @@ -56,6 +58,7 @@ public: TransactionManager &tm; Transaction &t; Onode &onode; + Onode *d_onode = nullptr; // The desination node in case of clone }; /// Writes bl to [offset, offset + bl.length()) @@ -101,6 +104,11 @@ public: using clear_ret = clear_iertr::future<>; clear_ret clear(context_t ctx); + /// Clone data of an Onode + using clone_iertr = base_iertr; + using clone_ret = clone_iertr::future<>; + clone_ret clone(context_t ctx); + private: /// Updates region [_offset, _offset + bl.length) to bl write_ret overwrite( @@ -122,6 +130,13 @@ private: context_t ctx, object_data_t &object_data, extent_len_t size); + + clone_ret clone_extents( + context_t ctx, + object_data_t &object_data, + lba_pin_list_t &pins, + laddr_t data_base); + private: /** * max_object_size diff --git a/ceph/src/crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.cc b/ceph/src/crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.cc index ee22b00b7..4db58414a 100644 --- a/ceph/src/crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.cc +++ b/ceph/src/crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.cc @@ -235,11 +235,12 @@ OMapInnerNode::list( return trans_intr::repeat( [&, config, oc, this]() -> list_iertr::future { - if (iter == liter || result.size() == config.max_result_size) { - complete = iter == liter; + if (iter == liter) { + complete = true; return list_iertr::make_ready_future( seastar::stop_iteration::yes); } + assert(result.size() < config.max_result_size); auto laddr = iter->get_val(); return omap_load_extent( oc, laddr, @@ -278,8 +279,12 @@ OMapInnerNode::list( } } result.merge(std::move(child_result)); + if (result.size() == config.max_result_size) { + return list_iertr::make_ready_future( + seastar::stop_iteration::yes); + } ++iter; - assert(child_complete || result.size() == config.max_result_size); + assert(child_complete); return list_iertr::make_ready_future( seastar::stop_iteration::no); }); diff --git a/ceph/src/crimson/os/seastore/omap_manager/btree/string_kv_node_layout.h b/ceph/src/crimson/os/seastore/omap_manager/btree/string_kv_node_layout.h index 7d449d018..72b13fedf 100644 --- a/ceph/src/crimson/os/seastore/omap_manager/btree/string_kv_node_layout.h +++ b/ceph/src/crimson/os/seastore/omap_manager/btree/string_kv_node_layout.h @@ -1,4 +1,4 @@ -// -*- mode:C++; tab-width:8; c-basic-index:2; indent-tabs-mode:t -*- +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once diff --git a/ceph/src/crimson/os/seastore/onode.h b/ceph/src/crimson/os/seastore/onode.h index 952dd9cca..069daa3df 100644 --- a/ceph/src/crimson/os/seastore/onode.h +++ b/ceph/src/crimson/os/seastore/onode.h @@ -62,6 +62,7 @@ public: default_metadata_range(dmr) {} + virtual bool is_alive() const = 0; virtual const onode_layout_t &get_layout() const = 0; virtual onode_layout_t &get_mutable_layout(Transaction &t) = 0; virtual ~Onode() = default; diff --git a/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.cc b/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.cc index 8d75f9fa8..bff27ab65 100644 --- a/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.cc +++ b/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.cc @@ -93,16 +93,19 @@ FLTreeOnodeManager::write_dirty_ret FLTreeOnodeManager::write_dirty( { return trans_intr::do_for_each( onodes, - [this, &trans](auto &onode) -> eagain_ifuture<> { + [&trans](auto &onode) -> eagain_ifuture<> { + if (!onode) { + return eagain_iertr::make_ready_future<>(); + } auto &flonode = static_cast(*onode); + if (!flonode.is_alive()) { + return eagain_iertr::make_ready_future<>(); + } switch (flonode.status) { case FLTreeOnode::status_t::MUTATED: { flonode.populate_recorder(trans); return eagain_iertr::make_ready_future<>(); } - case FLTreeOnode::status_t::DELETED: { - return tree.erase(trans, flonode); - } case FLTreeOnode::status_t::STABLE: { return eagain_iertr::make_ready_future<>(); } @@ -117,8 +120,12 @@ FLTreeOnodeManager::erase_onode_ret FLTreeOnodeManager::erase_onode( OnodeRef &onode) { auto &flonode = static_cast(*onode); + assert(flonode.is_alive()); + if (flonode.status == FLTreeOnode::status_t::MUTATED) { + flonode.populate_recorder(trans); + } flonode.mark_delete(); - return erase_onode_iertr::now(); + return tree.erase(trans, flonode); } FLTreeOnodeManager::list_onodes_ret FLTreeOnodeManager::list_onodes( diff --git a/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h b/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h index 0367b823f..09998fbfa 100644 --- a/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h +++ b/ceph/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h @@ -70,6 +70,9 @@ struct FLTreeOnode final : Onode, Value { } }; + bool is_alive() const { + return status != status_t::DELETED; + } const onode_layout_t &get_layout() const final { assert(status != status_t::DELETED); return *read_payload(); diff --git a/ceph/src/crimson/os/seastore/random_block_manager.h b/ceph/src/crimson/os/seastore/random_block_manager.h index 0478f5d0e..d9be1b5e6 100644 --- a/ceph/src/crimson/os/seastore/random_block_manager.h +++ b/ceph/src/crimson/os/seastore/random_block_manager.h @@ -22,6 +22,18 @@ namespace crimson::os::seastore { +struct rbm_shard_info_t { + std::size_t size = 0; + uint64_t start_offset = 0; + + DENC(rbm_shard_info_t, v, p) { + DENC_START(1, 1, p); + denc(v.size, p); + denc(v.start_offset, p); + DENC_FINISH(p); + } +}; + struct rbm_metadata_header_t { size_t size = 0; size_t block_size = 0; @@ -29,6 +41,8 @@ struct rbm_metadata_header_t { uint64_t journal_size = 0; checksum_t crc = 0; device_config_t config; + unsigned int shard_num = 0; + std::vector shard_infos; DENC(rbm_metadata_header_t, v, p) { DENC_START(1, 1, p); @@ -39,9 +53,28 @@ struct rbm_metadata_header_t { denc(v.journal_size, p); denc(v.crc, p); denc(v.config, p); + denc(v.shard_num, p); + denc(v.shard_infos, p); DENC_FINISH(p); } + void validate() const { + ceph_assert(shard_num == seastar::smp::count); + ceph_assert(block_size > 0); + for (unsigned int i = 0; i < seastar::smp::count; i ++) { + ceph_assert(shard_infos[i].size > block_size && + shard_infos[i].size % block_size == 0); + ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX); + ceph_assert(journal_size > 0 && + journal_size % block_size == 0); + ceph_assert(shard_infos[i].start_offset < size && + shard_infos[i].start_offset % block_size == 0); + } + ceph_assert(config.spec.magic != 0); + ceph_assert(get_default_backend_of_device(config.spec.dtype) == + backend_type_t::RANDOM_BLOCK); + ceph_assert(config.spec.id <= DEVICE_ID_MAX_VALID); + } }; enum class rbm_extent_state_t { @@ -127,12 +160,17 @@ seastar::future> get_rb_device(const std::string &device); std::ostream &operator<<(std::ostream &out, const rbm_metadata_header_t &header); +std::ostream &operator<<(std::ostream &out, const rbm_shard_info_t &shard); } +WRITE_CLASS_DENC_BOUNDED( + crimson::os::seastore::rbm_shard_info_t +) WRITE_CLASS_DENC_BOUNDED( crimson::os::seastore::rbm_metadata_header_t ) #if FMT_VERSION >= 90000 template<> struct fmt::formatter : fmt::ostream_formatter {}; +template<> struct fmt::formatter : fmt::ostream_formatter {}; #endif diff --git a/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.cc b/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.cc index d7dacf4f5..511b70a2e 100644 --- a/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.cc +++ b/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.cc @@ -78,7 +78,7 @@ BlockRBManager::open_ertr::future<> BlockRBManager::open() auto ool_start = get_start_rbm_addr(); allocator->init( ool_start, - device->get_available_size() - + device->get_shard_end() - ool_start, device->get_block_size()); return open_ertr::now(); @@ -91,8 +91,8 @@ BlockRBManager::write_ertr::future<> BlockRBManager::write( LOG_PREFIX(BlockRBManager::write); ceph_assert(device); rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr); - rbm_abs_addr start = 0; - rbm_abs_addr end = device->get_available_size(); + rbm_abs_addr start = device->get_shard_start(); + rbm_abs_addr end = device->get_shard_end(); if (addr < start || addr + bptr.length() > end) { ERROR("out of range: start {}, end {}, addr {}, length {}", start, end, addr, bptr.length()); @@ -112,8 +112,8 @@ BlockRBManager::read_ertr::future<> BlockRBManager::read( LOG_PREFIX(BlockRBManager::read); ceph_assert(device); rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr); - rbm_abs_addr start = 0; - rbm_abs_addr end = device->get_available_size(); + rbm_abs_addr start = device->get_shard_start(); + rbm_abs_addr end = device->get_shard_end(); if (addr < start || addr + bptr.length() > end) { ERROR("out of range: start {}, end {}, addr {}, length {}", start, end, addr, bptr.length()); @@ -158,7 +158,18 @@ std::ostream &operator<<(std::ostream &out, const rbm_metadata_header_t &header) << ", feature=" << header.feature << ", journal_size=" << header.journal_size << ", crc=" << header.crc - << ", config=" << header.config; + << ", config=" << header.config + << ", shard_num=" << header.shard_num; + for (auto p : header.shard_infos) { + out << p; + } + return out << ")"; +} + +std::ostream &operator<<(std::ostream &out, const rbm_shard_info_t &shard) +{ + out << " rbm_shard_info_t(size=" << shard.size + << ", start_offset=" << shard.start_offset; return out << ")"; } diff --git a/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.h b/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.h index 5db46b237..b686820d0 100644 --- a/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.h +++ b/ceph/src/crimson/os/seastore/random_block_manager/block_rb_manager.h @@ -62,10 +62,10 @@ public: void complete_allocation(paddr_t addr, size_t size) final; size_t get_start_rbm_addr() const { - return device->get_journal_start() + device->get_journal_size(); + return device->get_shard_journal_start() + device->get_journal_size(); } size_t get_size() const final { - return device->get_available_size() - get_start_rbm_addr(); + return device->get_shard_end() - get_start_rbm_addr(); }; extent_len_t get_block_size() const final { return device->get_block_size(); } @@ -97,7 +97,7 @@ public: assert(allocator); rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr); assert(addr >= get_start_rbm_addr() && - addr + len <= device->get_available_size()); + addr + len <= device->get_shard_end()); allocator->mark_extent_used(addr, len); } @@ -105,7 +105,7 @@ public: assert(allocator); rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr); assert(addr >= get_start_rbm_addr() && - addr + len <= device->get_available_size()); + addr + len <= device->get_shard_end()); allocator->free_extent(addr, len); } @@ -119,7 +119,7 @@ public: assert(allocator); rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr); assert(addr >= get_start_rbm_addr() && - addr + size <= device->get_available_size()); + addr + size <= device->get_shard_end()); return allocator->get_extent_state(addr, size); } diff --git a/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.cc b/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.cc index 44a8c1041..6437f06a4 100644 --- a/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.cc +++ b/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.cc @@ -22,13 +22,21 @@ namespace { namespace crimson::os::seastore::random_block_device::nvme { +NVMeBlockDevice::mkfs_ret NVMeBlockDevice::mkfs(device_config_t config) { + using crimson::common::get_conf; + return shard_devices.local().do_primary_mkfs(config, + seastar::smp::count, + get_conf("seastore_cbjournal_size") + ); +} + open_ertr::future<> NVMeBlockDevice::open( const std::string &in_path, seastar::open_flags mode) { return seastar::do_with(in_path, [this, mode](auto& in_path) { return seastar::file_stat(in_path).then([this, mode, in_path](auto stat) { return seastar::open_file_dma(in_path, mode).then([=, this](auto file) { - device = file; + device = std::move(file); logger().debug("open"); // Get SSD's features from identify_controller and namespace command. // Do identify_controller first, and then identify_namespace. @@ -68,7 +76,7 @@ open_ertr::future<> NVMeBlockDevice::open_for_io( return seastar::open_file_dma(in_path, mode).then([this]( auto file) { assert(io_device.size() > stream_index_to_open); - io_device[stream_index_to_open] = file; + io_device[stream_index_to_open] = std::move(file); return io_device[stream_index_to_open].fcntl( F_SET_FILE_RW_HINT, (uintptr_t)&stream_index_to_open).then([this](auto ret) { @@ -82,7 +90,13 @@ open_ertr::future<> NVMeBlockDevice::open_for_io( NVMeBlockDevice::mount_ret NVMeBlockDevice::mount() { logger().debug(" mount "); - return do_mount(); + return shard_devices.invoke_on_all([](auto &local_device) { + return local_device.do_shard_mount( + ).handle_error( + crimson::ct_error::assert_all{ + "Invalid error in RBMDevice::do_mount" + }); + }); } write_ertr::future<> NVMeBlockDevice::write( diff --git a/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.h b/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.h index 4dc4de533..ed8f99be8 100644 --- a/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.h +++ b/ceph/src/crimson/os/seastore/random_block_manager/nvme_block_device.h @@ -209,11 +209,7 @@ public: mount_ret mount() final; - mkfs_ret mkfs(device_config_t config) final { - using crimson::common::get_conf; - super.journal_size = get_conf("seastore_cbjournal_size"); - return do_mkfs(config); - } + mkfs_ret mkfs(device_config_t config) final; write_ertr::future<> writev( uint64_t offset, @@ -267,6 +263,18 @@ public: return device_path; } + seastar::future<> start() final { + return shard_devices.start(device_path); + } + + seastar::future<> stop() final { + return shard_devices.stop(); + } + + Device& get_sharded_device() final { + return shard_devices.local(); + } + uint64_t get_preffered_write_granularity() const { return write_granularity; } uint64_t get_preffered_write_alignment() const { return write_alignment; } uint64_t get_atomic_write_unit() const { return atomic_write_unit; } @@ -346,6 +354,7 @@ private: bool data_protection_enabled = false; std::string device_path; + seastar::sharded shard_devices; }; } diff --git a/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.cc b/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.cc index 138611438..cea6c30a7 100644 --- a/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.cc +++ b/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.cc @@ -18,28 +18,46 @@ namespace crimson::os::seastore::random_block_device { #include "crimson/os/seastore/logging.h" SET_SUBSYS(seastore_device); -RBMDevice::mkfs_ret RBMDevice::do_mkfs(device_config_t config) { - LOG_PREFIX(RBMDevice::mkfs); +RBMDevice::mkfs_ret RBMDevice::do_primary_mkfs(device_config_t config, + int shard_num, size_t journal_size) { + LOG_PREFIX(RBMDevice::do_primary_mkfs); return stat_device( ).handle_error( mkfs_ertr::pass_further{}, crimson::ct_error::assert_all{ - "Invalid error stat_device in RBMDevice::mkfs"} - ).safe_then([this, FNAME, config=std::move(config)](auto st) { + "Invalid error stat_device in RBMDevice::do_primary_mkfs"} + ).safe_then( + [this, FNAME, config=std::move(config), shard_num, journal_size](auto st) { super.block_size = st.block_size; super.size = st.size; super.feature |= RBM_BITMAP_BLOCK_CRC; super.config = std::move(config); - assert(super.journal_size); - assert(super.size >= super.journal_size); + super.journal_size = journal_size; + ceph_assert_always(super.journal_size > 0); + ceph_assert_always(super.size >= super.journal_size); + ceph_assert_always(shard_num > 0); + + std::vector shard_infos(shard_num); + for (int i = 0; i < shard_num; i++) { + uint64_t aligned_size = + (super.size / shard_num) - + ((super.size / shard_num) % super.block_size); + shard_infos[i].size = aligned_size; + shard_infos[i].start_offset = i * aligned_size; + assert(shard_infos[i].size > super.journal_size); + } + super.shard_infos = shard_infos; + super.shard_num = shard_num; + shard_info = shard_infos[seastar::this_shard_id()]; DEBUG("super {} ", super); + // write super block return open(get_device_path(), seastar::open_flags::rw | seastar::open_flags::dsync ).handle_error( mkfs_ertr::pass_further{}, crimson::ct_error::assert_all{ - "Invalid error open in RBMDevice::mkfs"} + "Invalid error open in RBMDevice::do_primary_mkfs"} ).safe_then([this] { return write_rbm_header( ).safe_then([this] { @@ -47,7 +65,7 @@ RBMDevice::mkfs_ret RBMDevice::do_mkfs(device_config_t config) { }).handle_error( mkfs_ertr::pass_further{}, crimson::ct_error::assert_all{ - "Invalid error write_rbm_header in RBMDevice::mkfs" + "Invalid error write_rbm_header in RBMDevice::do_primary_mkfs" }); }); }); @@ -129,7 +147,7 @@ read_ertr::future RBMDevice::read_rbm_header( }); } -RBMDevice::mount_ret RBMDevice::do_mount() +RBMDevice::mount_ret RBMDevice::do_shard_mount() { return open(get_device_path(), seastar::open_flags::rw | seastar::open_flags::dsync @@ -138,25 +156,30 @@ RBMDevice::mount_ret RBMDevice::do_mount() ).handle_error( mount_ertr::pass_further{}, crimson::ct_error::assert_all{ - "Invalid error stat_device in RBMDevice::mount"} + "Invalid error stat_device in RBMDevice::do_shard_mount"} ).safe_then([this](auto st) { + assert(st.block_size > 0); super.block_size = st.block_size; return read_rbm_header(RBM_START_ADDRESS - ).safe_then([](auto s) { + ).safe_then([this](auto s) { + LOG_PREFIX(RBMDevice::do_shard_mount); + shard_info = s.shard_infos[seastar::this_shard_id()]; + INFO("{} read {}", device_id_printer_t{get_device_id()}, shard_info); + s.validate(); return seastar::now(); }); }); }).handle_error( mount_ertr::pass_further{}, crimson::ct_error::assert_all{ - "Invalid error mount in NVMeBlockDevice::mount"} + "Invalid error mount in RBMDevice::do_shard_mount"} ); } EphemeralRBMDeviceRef create_test_ephemeral(uint64_t journal_size, uint64_t data_size) { return EphemeralRBMDeviceRef( new EphemeralRBMDevice(journal_size + data_size + - random_block_device::RBMDevice::get_journal_start(), + random_block_device::RBMDevice::get_shard_reserved_size(), EphemeralRBMDevice::TEST_BLOCK_SIZE)); } @@ -236,5 +259,13 @@ write_ertr::future<> EphemeralRBMDevice::writev( return write_ertr::now(); } +EphemeralRBMDevice::mount_ret EphemeralRBMDevice::mount() { + return do_shard_mount(); +} + +EphemeralRBMDevice::mkfs_ret EphemeralRBMDevice::mkfs(device_config_t config) { + return do_primary_mkfs(config, 1, DEFAULT_TEST_CBJOURNAL_SIZE); +} + } diff --git a/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.h b/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.h index 7f30b197f..501d9f913 100644 --- a/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.h +++ b/ceph/src/crimson/os/seastore/random_block_manager/rbm_device.h @@ -84,6 +84,7 @@ public: } protected: rbm_metadata_header_t super; + rbm_shard_info_t shard_info; public: RBMDevice() {} virtual ~RBMDevice() = default; @@ -152,8 +153,13 @@ public: mkfs_ret do_mkfs(device_config_t); + // shard 0 mkfs + mkfs_ret do_primary_mkfs(device_config_t, int shard_num, size_t journal_size); + mount_ret do_mount(); + mount_ret do_shard_mount(); + write_ertr::future<> write_rbm_header(); read_ertr::future read_rbm_header(rbm_abs_addr addr); @@ -168,9 +174,21 @@ public: return super.journal_size; } - static rbm_abs_addr get_journal_start() { + static rbm_abs_addr get_shard_reserved_size() { return RBM_SUPERBLOCK_SIZE; } + + rbm_abs_addr get_shard_journal_start() { + return shard_info.start_offset + get_shard_reserved_size(); + } + + uint64_t get_shard_start() const { + return shard_info.start_offset; + } + + uint64_t get_shard_end() const { + return shard_info.start_offset + shard_info.size; + } }; using RBMDeviceRef = std::unique_ptr; @@ -195,14 +213,8 @@ public: std::size_t get_available_size() const final { return size; } extent_len_t get_block_size() const final { return block_size; } - mount_ret mount() final { - return do_mount(); - } - - mkfs_ret mkfs(device_config_t config) final { - super.journal_size = DEFAULT_TEST_CBJOURNAL_SIZE; - return do_mkfs(config); - } + mount_ret mount() final; + mkfs_ret mkfs(device_config_t config) final; open_ertr::future<> open( const std::string &in_path, diff --git a/ceph/src/crimson/os/seastore/record_scanner.cc b/ceph/src/crimson/os/seastore/record_scanner.cc new file mode 100644 index 000000000..9778bbb77 --- /dev/null +++ b/ceph/src/crimson/os/seastore/record_scanner.cc @@ -0,0 +1,239 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- +// vim: ts=8 sw=2 smarttab expandtab + +#include "crimson/os/seastore/record_scanner.h" + +#include "crimson/os/seastore/logging.h" + +SET_SUBSYS(seastore_journal); + +namespace crimson::os::seastore { + +RecordScanner::scan_valid_records_ret +RecordScanner::scan_valid_records( + scan_valid_records_cursor &cursor, + segment_nonce_t nonce, + size_t budget, + found_record_handler_t &handler) +{ + LOG_PREFIX(RecordScanner::scan_valid_records); + initialize_cursor(cursor); + DEBUG("starting at {}, budget={}", cursor, budget); + auto retref = std::make_unique(0); + auto &budget_used = *retref; + return crimson::repeat( + [=, &cursor, &budget_used, &handler, this]() mutable + -> scan_valid_records_ertr::future { + return [=, &handler, &cursor, &budget_used, this] { + if (!cursor.last_valid_header_found) { + return read_validate_record_metadata(cursor, nonce + ).safe_then([=, &cursor](auto md) { + if (!md) { + cursor.last_valid_header_found = true; + if (cursor.is_complete()) { + INFO("complete at {}, invalid record group metadata", + cursor); + } else { + DEBUG("found invalid record group metadata at {}, " + "processing {} pending record groups", + cursor.seq, + cursor.pending_record_groups.size()); + } + return scan_valid_records_ertr::now(); + } else { + auto& [header, md_bl] = *md; + DEBUG("found valid {} at {}", header, cursor.seq); + cursor.emplace_record_group(header, std::move(md_bl)); + return scan_valid_records_ertr::now(); + } + }).safe_then([=, &cursor, &budget_used, &handler, this] { + DEBUG("processing committed record groups until {}, {} pending", + cursor.last_committed, + cursor.pending_record_groups.size()); + return crimson::repeat( + [=, &budget_used, &cursor, &handler, this] { + if (cursor.pending_record_groups.empty()) { + /* This is only possible if the segment is empty. + * A record's last_commited must be prior to its own + * location since it itself cannot yet have been committed + * at its own time of submission. Thus, the most recently + * read record must always fall after cursor.last_committed */ + return scan_valid_records_ertr::make_ready_future< + seastar::stop_iteration>(seastar::stop_iteration::yes); + } + auto &next = cursor.pending_record_groups.front(); + journal_seq_t next_seq = {cursor.seq.segment_seq, next.offset}; + if (cursor.last_committed == JOURNAL_SEQ_NULL || + next_seq > cursor.last_committed) { + return scan_valid_records_ertr::make_ready_future< + seastar::stop_iteration>(seastar::stop_iteration::yes); + } + return consume_next_records(cursor, handler, budget_used + ).safe_then([] { + return scan_valid_records_ertr::make_ready_future< + seastar::stop_iteration>(seastar::stop_iteration::no); + }); + }); + }); + } else { + assert(!cursor.pending_record_groups.empty()); + auto &next = cursor.pending_record_groups.front(); + return read_validate_data(next.offset, next.header + ).safe_then([this, FNAME, &budget_used, &cursor, &handler, &next](auto valid) { + if (!valid) { + INFO("complete at {}, invalid record group data at {}, {}", + cursor, next.offset, next.header); + cursor.pending_record_groups.clear(); + return scan_valid_records_ertr::now(); + } + return consume_next_records(cursor, handler, budget_used); + }); + } + }().safe_then([=, &budget_used, &cursor] { + if (cursor.is_complete() || budget_used >= budget) { + DEBUG("finish at {}, budget_used={}, budget={}", + cursor, budget_used, budget); + return seastar::stop_iteration::yes; + } else { + return seastar::stop_iteration::no; + } + }); + }).safe_then([retref=std::move(retref)]() mutable -> scan_valid_records_ret { + return scan_valid_records_ret( + scan_valid_records_ertr::ready_future_marker{}, + std::move(*retref)); + }); +} + +RecordScanner::read_validate_record_metadata_ret +RecordScanner::read_validate_record_metadata( + scan_valid_records_cursor &cursor, + segment_nonce_t nonce) +{ + LOG_PREFIX(RecordScanner::read_validate_record_metadata); + paddr_t start = cursor.seq.offset; + auto block_size = cursor.get_block_size(); + if (get_segment_off(cursor.seq.offset) + block_size > get_segment_end_offset(cursor.seq.offset)) { + DEBUG("failed -- record group header block {}~4096 > segment_size {}", + start, get_segment_end_offset(cursor.seq.offset)); + return read_validate_record_metadata_ret( + read_validate_record_metadata_ertr::ready_future_marker{}, + std::nullopt); + } + TRACE("reading record group header block {}~4096", start); + return read(start, block_size + ).safe_then([=](bufferptr bptr) mutable + -> read_validate_record_metadata_ret { + bufferlist bl; + bl.append(bptr); + auto maybe_header = try_decode_records_header(bl, nonce); + if (!maybe_header.has_value()) { + return read_validate_record_metadata_ret( + read_validate_record_metadata_ertr::ready_future_marker{}, + std::nullopt); + } + + auto& header = *maybe_header; + if (header.mdlength < block_size || + header.mdlength % block_size != 0 || + header.dlength % block_size != 0 || + (header.committed_to != JOURNAL_SEQ_NULL && + get_segment_off(header.committed_to.offset) % + cursor.get_block_size() != 0) || + (get_segment_off(cursor.seq.offset) + header.mdlength + header.dlength > + get_segment_end_offset(cursor.seq.offset))) { + ERROR("failed, invalid record group header {}", header); + return crimson::ct_error::input_output_error::make(); + } + + if (is_record_segment_seq_invalid(cursor, header)) { + return read_validate_record_metadata_ret( + read_validate_record_metadata_ertr::ready_future_marker{}, + std::nullopt); + } + + if (header.mdlength == block_size) { + return read_validate_record_metadata_ret( + read_validate_record_metadata_ertr::ready_future_marker{}, + std::make_pair(std::move(header), std::move(bl)) + ); + } + + paddr_t rest_start = cursor.seq.offset.add_offset(block_size); + auto rest_len = header.mdlength - block_size; + TRACE("reading record group header rest {}~{}", rest_start, rest_len); + return read(rest_start, rest_len + ).safe_then([header=std::move(header), bl=std::move(bl) + ](auto&& bptail) mutable { + bl.push_back(bptail); + return read_validate_record_metadata_ret( + read_validate_record_metadata_ertr::ready_future_marker{}, + std::make_pair(std::move(header), std::move(bl))); + }); + }).safe_then([](auto p) { + if (p && validate_records_metadata(p->second)) { + return read_validate_record_metadata_ret( + read_validate_record_metadata_ertr::ready_future_marker{}, + std::move(*p) + ); + } else { + return read_validate_record_metadata_ret( + read_validate_record_metadata_ertr::ready_future_marker{}, + std::nullopt); + } + }); + +} + +RecordScanner::read_validate_data_ret RecordScanner::read_validate_data( + paddr_t record_base, + const record_group_header_t &header) +{ + LOG_PREFIX(RecordScanner::read_validate_data); + auto data_addr = record_base.add_offset(header.mdlength); + TRACE("reading record group data blocks {}~{}", data_addr, header.dlength); + return read( + data_addr, + header.dlength + ).safe_then([=, &header](auto bptr) { + bufferlist bl; + bl.append(bptr); + return validate_records_data(header, bl); + }); +} + +RecordScanner::consume_record_group_ertr::future<> +RecordScanner::consume_next_records( + scan_valid_records_cursor& cursor, + found_record_handler_t& handler, + std::size_t& budget_used) +{ + LOG_PREFIX(RecordScanner::consume_next_records); + auto& next = cursor.pending_record_groups.front(); + auto total_length = next.header.dlength + next.header.mdlength; + budget_used += total_length; + auto locator = record_locator_t{ + next.offset.add_offset(next.header.mdlength), + write_result_t{ + journal_seq_t{ + cursor.seq.segment_seq, + next.offset + }, + total_length + } + }; + DEBUG("processing {} at {}, budget_used={}", + next.header, locator, budget_used); + return handler( + locator, + next.header, + next.mdbuffer + ).safe_then([FNAME, &cursor] { + cursor.pop_record_group(); + if (cursor.is_complete()) { + INFO("complete at {}, no more record group", cursor); + } + }); +} + +} diff --git a/ceph/src/crimson/os/seastore/record_scanner.h b/ceph/src/crimson/os/seastore/record_scanner.h new file mode 100644 index 000000000..2cbc7c562 --- /dev/null +++ b/ceph/src/crimson/os/seastore/record_scanner.h @@ -0,0 +1,83 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- +// vim: ts=8 sw=2 smarttab expandtab + +#pragma once + +#include "crimson/common/errorator.h" +#include "crimson/os/seastore/seastore_types.h" +#include "crimson/os/seastore/segment_manager.h" + + +namespace crimson::os::seastore { + +class RecordScanner { +public: + using read_ertr = SegmentManager::read_ertr; + using scan_valid_records_ertr = read_ertr; + using scan_valid_records_ret = scan_valid_records_ertr::future< + size_t>; + using found_record_handler_t = std::function< + scan_valid_records_ertr::future<>( + record_locator_t record_locator, + // callee may assume header and bl will remain valid until + // returned future resolves + const record_group_header_t &header, + const bufferlist &mdbuf)>; + scan_valid_records_ret scan_valid_records( + scan_valid_records_cursor &cursor, ///< [in, out] cursor, updated during call + segment_nonce_t nonce, ///< [in] nonce for segment + size_t budget, ///< [in] max budget to use + found_record_handler_t &handler ///< [in] handler for records + ); ///< @return used budget + + device_off_t get_segment_off(paddr_t addr) const { + if (addr.get_addr_type() == paddr_types_t::SEGMENT) { + auto& seg_addr = addr.as_seg_paddr(); + return seg_addr.get_segment_off(); + } + assert(addr.get_addr_type() == paddr_types_t::RANDOM_BLOCK); + auto& blk_addr = addr.as_blk_paddr(); + return blk_addr.get_device_off(); + } + +protected: + /// read record metadata for record starting at start + using read_validate_record_metadata_ertr = read_ertr; + using read_validate_record_metadata_ret = + read_validate_record_metadata_ertr::future< + std::optional> + >; + read_validate_record_metadata_ret read_validate_record_metadata( + scan_valid_records_cursor &cursor, + segment_nonce_t nonce); + + /// read and validate data + using read_validate_data_ertr = read_ertr; + using read_validate_data_ret = read_validate_data_ertr::future; + read_validate_data_ret read_validate_data( + paddr_t record_base, + const record_group_header_t &header ///< caller must ensure lifetime through + /// future resolution + ); + + virtual bool is_record_segment_seq_invalid(scan_valid_records_cursor &cursor, + record_group_header_t &h) = 0; + + virtual int64_t get_segment_end_offset(paddr_t addr) = 0; + + using read_ret = read_ertr::future; + virtual read_ret read(paddr_t start, size_t len) = 0; + + using consume_record_group_ertr = scan_valid_records_ertr; + consume_record_group_ertr::future<> consume_next_records( + scan_valid_records_cursor& cursor, + found_record_handler_t& handler, + std::size_t& budget_used); + + virtual void initialize_cursor(scan_valid_records_cursor &cursor) = 0; + + virtual ~RecordScanner() {} + +}; + +} diff --git a/ceph/src/crimson/os/seastore/root_block.h b/ceph/src/crimson/os/seastore/root_block.h index bf3dfb542..0e45519ce 100644 --- a/ceph/src/crimson/os/seastore/root_block.h +++ b/ceph/src/crimson/os/seastore/root_block.h @@ -41,7 +41,7 @@ struct RootBlock : CachedExtent { CachedExtent* lba_root_node = nullptr; CachedExtent* backref_root_node = nullptr; - RootBlock() : CachedExtent(0) {} + RootBlock() : CachedExtent(zero_length_t()) {}; RootBlock(const RootBlock &rhs) : CachedExtent(rhs), diff --git a/ceph/src/crimson/os/seastore/seastore.cc b/ceph/src/crimson/os/seastore/seastore.cc index b44d66967..897a063e0 100644 --- a/ceph/src/crimson/os/seastore/seastore.cc +++ b/ceph/src/crimson/os/seastore/seastore.cc @@ -958,7 +958,7 @@ seastar::future SeaStore::Shard::stat( Transaction::src_t::READ, "stat", op_type_t::STAT, - [=, this, &oid](auto &t, auto &onode) { + [=, this](auto &t, auto &onode) { struct stat st; auto &olayout = onode.get_layout(); st.st_size = olayout.size; @@ -1196,8 +1196,20 @@ seastar::future<> SeaStore::Shard::do_transaction_no_callbacks( op_type_t::TRANSACTION, [this](auto &ctx) { return with_trans_intr(*ctx.transaction, [&, this](auto &t) { - return seastar::do_with(std::vector(ctx.iter.objects.size()), - std::vector(), +#ifndef NDEBUG + LOG_PREFIX(SeaStore::Shard::do_transaction_no_callbacks); + TRACET(" transaction dump:\n", t); + JSONFormatter f(true); + f.open_object_section("transaction"); + ctx.ext_transaction.dump(&f); + f.close_section(); + std::stringstream str; + f.flush(str); + TRACET("{}", t, str.str()); +#endif + return seastar::do_with( + std::vector(ctx.iter.objects.size()), + std::vector(ctx.iter.objects.size()), [this, &ctx](auto& onodes, auto& d_onodes) mutable { return trans_intr::repeat( [this, &ctx, &onodes, &d_onodes]() mutable @@ -1289,20 +1301,42 @@ SeaStore::Shard::_do_transaction_step( *ctx.transaction, i.get_oid(op->oid)); } } - return fut.si_then([&, op, this](auto&& get_onode) -> tm_ret { - LOG_PREFIX(SeaStore::_do_transaction_step); + return fut.si_then([&, op](auto get_onode) { OnodeRef &o = onodes[op->oid]; if (!o) { assert(get_onode); o = get_onode; - d_onodes.push_back(get_onode); + d_onodes[op->oid] = get_onode; } + if (op->op == Transaction::OP_CLONE && !d_onodes[op->dest_oid]) { + //TODO: use when_all_succeed after making onode tree + // support parallel extents loading + return onode_manager->get_or_create_onode( + *ctx.transaction, i.get_oid(op->dest_oid) + ).si_then([&, op](auto dest_onode) { + assert(dest_onode); + auto &d_o = onodes[op->dest_oid]; + assert(!d_o); + assert(!d_onodes[op->dest_oid]); + d_o = dest_onode; + d_onodes[op->dest_oid] = dest_onode; + return seastar::now(); + }); + } else { + return OnodeManager::get_or_create_onode_iertr::now(); + } + }).si_then([&, op, this]() -> tm_ret { + LOG_PREFIX(SeaStore::_do_transaction_step); try { switch (op->op) { case Transaction::OP_REMOVE: { TRACET("removing {}", *ctx.transaction, i.get_oid(op->oid)); - return _remove(ctx, onodes[op->oid]); + return _remove(ctx, onodes[op->oid] + ).si_then([&onodes, &d_onodes, op] { + onodes[op->oid].reset(); + d_onodes[op->oid].reset(); + }); } case Transaction::OP_CREATE: case Transaction::OP_TOUCH: @@ -1390,6 +1424,14 @@ SeaStore::Shard::_do_transaction_step( // TODO return tm_iertr::now(); } + case Transaction::OP_CLONE: + { + TRACET("cloning {} to {}", + *ctx.transaction, + i.get_oid(op->oid), + i.get_oid(op->dest_oid)); + return _clone(ctx, onodes[op->oid], d_onodes[op->dest_oid]); + } default: ERROR("bad op {}", static_cast(op->op)); return crimson::ct_error::input_output_error::make(); @@ -1507,6 +1549,31 @@ SeaStore::Shard::_write( }); } +SeaStore::Shard::tm_ret +SeaStore::Shard::_clone( + internal_context_t &ctx, + OnodeRef &onode, + OnodeRef &d_onode) +{ + LOG_PREFIX(SeaStore::_clone); + DEBUGT("onode={} d_onode={}", *ctx.transaction, *onode, *d_onode); + return seastar::do_with( + ObjectDataHandler(max_object_size), + [this, &ctx, &onode, &d_onode](auto &objHandler) { + //TODO: currently, we only care about object data, leaving cloning + // of xattr/omap for future work + auto &object_size = onode->get_layout().size; + auto &d_object_size = d_onode->get_mutable_layout(*ctx.transaction).size; + d_object_size = object_size; + return objHandler.clone( + ObjectDataHandler::context_t{ + *transaction_manager, + *ctx.transaction, + *onode, + d_onode.get()}); + }); +} + SeaStore::Shard::tm_ret SeaStore::Shard::_zero( internal_context_t &ctx, diff --git a/ceph/src/crimson/os/seastore/seastore.h b/ceph/src/crimson/os/seastore/seastore.h index df4323df5..876fadca8 100644 --- a/ceph/src/crimson/os/seastore/seastore.h +++ b/ceph/src/crimson/os/seastore/seastore.h @@ -353,6 +353,10 @@ public: uint64_t offset, size_t len, ceph::bufferlist &&bl, uint32_t fadvise_flags); + tm_ret _clone( + internal_context_t &ctx, + OnodeRef &onode, + OnodeRef &d_onode); tm_ret _zero( internal_context_t &ctx, OnodeRef &onode, diff --git a/ceph/src/crimson/os/seastore/seastore_types.cc b/ceph/src/crimson/os/seastore/seastore_types.cc index 9328a0309..0acfdb74e 100644 --- a/ceph/src/crimson/os/seastore/seastore_types.cc +++ b/ceph/src/crimson/os/seastore/seastore_types.cc @@ -89,6 +89,15 @@ std::ostream& operator<<(std::ostream& out, segment_seq_printer_t seq) } } +std::ostream &operator<<(std::ostream &out, const pladdr_t &pladdr) +{ + if (pladdr.is_laddr()) { + return out << pladdr.get_laddr(); + } else { + return out << pladdr.get_paddr(); + } +} + std::ostream &operator<<(std::ostream &out, const paddr_t &rhs) { auto id = rhs.get_device_id(); @@ -779,8 +788,8 @@ device_type_t string_to_device_type(std::string type) { if (type == "SSD") { return device_type_t::SSD; } - if (type == "ZNS") { - return device_type_t::ZNS; + if (type == "ZBD") { + return device_type_t::ZBD; } if (type == "RANDOM_BLOCK_SSD") { return device_type_t::RANDOM_BLOCK_SSD; @@ -797,8 +806,8 @@ std::ostream& operator<<(std::ostream& out, device_type_t t) return out << "HDD"; case device_type_t::SSD: return out << "SSD"; - case device_type_t::ZNS: - return out << "ZNS"; + case device_type_t::ZBD: + return out << "ZBD"; case device_type_t::EPHEMERAL_COLD: return out << "EPHEMERAL_COLD"; case device_type_t::EPHEMERAL_MAIN: diff --git a/ceph/src/crimson/os/seastore/seastore_types.h b/ceph/src/crimson/os/seastore/seastore_types.h index 55d8eb4a2..0b4ad8536 100644 --- a/ceph/src/crimson/os/seastore/seastore_types.h +++ b/ceph/src/crimson/os/seastore/seastore_types.h @@ -210,7 +210,7 @@ constexpr segment_id_t NULL_SEG_ID = MAX_SEG_ID; /* Monotonically increasing segment seq, uniquely identifies * the incarnation of a segment */ -using segment_seq_t = uint32_t; +using segment_seq_t = uint64_t; static constexpr segment_seq_t MAX_SEG_SEQ = std::numeric_limits::max(); static constexpr segment_seq_t NULL_SEG_SEQ = MAX_SEG_SEQ; @@ -488,6 +488,7 @@ constexpr device_off_t decode_device_off(internal_paddr_t addr) { struct seg_paddr_t; struct blk_paddr_t; struct res_paddr_t; +struct pladdr_t; struct paddr_t { public: // P_ADDR_MAX == P_ADDR_NULL == paddr_t{} @@ -668,6 +669,8 @@ private: static_cast(offset)) {} friend struct paddr_le_t; + friend struct pladdr_le_t; + }; std::ostream &operator<<(std::ostream &out, const paddr_t &rhs); @@ -882,7 +885,7 @@ enum class device_type_t : uint8_t { NONE = 0, HDD, SSD, - ZNS, + ZBD, // ZNS SSD or SMR HDD EPHEMERAL_COLD, EPHEMERAL_MAIN, RANDOM_BLOCK_SSD, @@ -896,7 +899,7 @@ bool can_delay_allocation(device_type_t type); device_type_t string_to_device_type(std::string type); enum class backend_type_t { - SEGMENTED, // SegmentManager: SSD, ZNS, HDD + SEGMENTED, // SegmentManager: SSD, ZBD, HDD RANDOM_BLOCK // RBMDevice: RANDOM_BLOCK_SSD }; @@ -1032,6 +1035,103 @@ struct __attribute((packed)) laddr_le_t { } }; +constexpr uint64_t PL_ADDR_NULL = std::numeric_limits::max(); + +struct pladdr_t { + std::variant pladdr; + + pladdr_t() = default; + pladdr_t(const pladdr_t &) = default; + pladdr_t(laddr_t laddr) + : pladdr(laddr) {} + pladdr_t(paddr_t paddr) + : pladdr(paddr) {} + + bool is_laddr() const { + return pladdr.index() == 0; + } + + bool is_paddr() const { + return pladdr.index() == 1; + } + + pladdr_t& operator=(paddr_t paddr) { + pladdr = paddr; + return *this; + } + + pladdr_t& operator=(laddr_t laddr) { + pladdr = laddr; + return *this; + } + + bool operator==(const pladdr_t &) const = default; + + paddr_t get_paddr() const { + assert(pladdr.index() == 1); + return paddr_t(std::get<1>(pladdr)); + } + + laddr_t get_laddr() const { + assert(pladdr.index() == 0); + return laddr_t(std::get<0>(pladdr)); + } + +}; + +std::ostream &operator<<(std::ostream &out, const pladdr_t &pladdr); + +enum class addr_type_t : uint8_t { + PADDR=0, + LADDR=1, + MAX=2 // or NONE +}; + +struct __attribute((packed)) pladdr_le_t { + ceph_le64 pladdr = ceph_le64(PL_ADDR_NULL); + addr_type_t addr_type = addr_type_t::MAX; + + pladdr_le_t() = default; + pladdr_le_t(const pladdr_le_t &) = default; + explicit pladdr_le_t(const pladdr_t &addr) + : pladdr( + ceph_le64( + addr.is_laddr() ? + std::get<0>(addr.pladdr) : + std::get<1>(addr.pladdr).internal_paddr)), + addr_type( + addr.is_laddr() ? + addr_type_t::LADDR : + addr_type_t::PADDR) + {} + + operator pladdr_t() const { + if (addr_type == addr_type_t::LADDR) { + return pladdr_t(laddr_t(pladdr)); + } else { + assert(addr_type == addr_type_t::PADDR); + return pladdr_t(paddr_t(pladdr)); + } + } +}; + +template +struct min_max_t {}; + +template <> +struct min_max_t { + static constexpr laddr_t max = L_ADDR_MAX; + static constexpr laddr_t min = L_ADDR_MIN; + static constexpr laddr_t null = L_ADDR_NULL; +}; + +template <> +struct min_max_t { + static constexpr paddr_t max = P_ADDR_MAX; + static constexpr paddr_t min = P_ADDR_MIN; + static constexpr paddr_t null = P_ADDR_NULL; +}; + // logical offset, see LBAManager, TransactionManager using extent_len_t = uint32_t; constexpr extent_len_t EXTENT_LEN_MAX = @@ -2056,6 +2156,7 @@ struct scan_valid_records_cursor { journal_seq_t seq; journal_seq_t last_committed; std::size_t num_consumed_records = 0; + extent_len_t block_size = 0; struct found_record_group_t { paddr_t offset; @@ -2082,10 +2183,12 @@ struct scan_valid_records_cursor { return seq.offset.as_seg_paddr().get_segment_off(); } + extent_len_t get_block_size() const { + return block_size; + } + void increment_seq(segment_off_t off) { - auto& seg_addr = seq.offset.as_seg_paddr(); - seg_addr.set_segment_off( - seg_addr.get_segment_off() + off); + seq.offset = seq.offset.add_offset(off); } void emplace_record_group(const record_group_header_t&, ceph::bufferlist&&); @@ -2129,6 +2232,7 @@ template <> struct fmt::formatter : fmt::os template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; +template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; template <> struct fmt::formatter : fmt::ostream_formatter {}; diff --git a/ceph/src/crimson/os/seastore/segment_manager.cc b/ceph/src/crimson/os/seastore/segment_manager.cc index 098a9b068..1be9cce5f 100644 --- a/ceph/src/crimson/os/seastore/segment_manager.cc +++ b/ceph/src/crimson/os/seastore/segment_manager.cc @@ -6,7 +6,7 @@ #include "crimson/os/seastore/logging.h" #ifdef HAVE_ZNS -#include "crimson/os/seastore/segment_manager/zns.h" +#include "crimson/os/seastore/segment_manager/zbd.h" SET_SUBSYS(seastore_device); #endif @@ -63,23 +63,31 @@ SegmentManager::get_segment_manager( LOG_PREFIX(SegmentManager::get_segment_manager); return seastar::do_with( static_cast(0), - [&](auto &nr_zones) { + [FNAME, + dtype, + device](auto &nr_zones) { return seastar::open_file_dma( device + "/block", seastar::open_flags::rw - ).then([&](auto file) { + ).then([FNAME, + dtype, + device, + &nr_zones](auto file) { return seastar::do_with( file, - [=, &nr_zones](auto &f) -> seastar::future { + [&nr_zones](auto &f) -> seastar::future { ceph_assert(f); return f.ioctl(BLKGETNRZONES, (void *)&nr_zones); }); - }).then([&](auto ret) -> crimson::os::seastore::SegmentManagerRef { + }).then([FNAME, + dtype, + device, + &nr_zones](auto ret) -> crimson::os::seastore::SegmentManagerRef { crimson::os::seastore::SegmentManagerRef sm; INFO("Found {} zones.", nr_zones); if (nr_zones != 0) { return std::make_unique< - segment_manager::zns::ZNSSegmentManager + segment_manager::zbd::ZBDSegmentManager >(device + "/block"); } else { return std::make_unique< diff --git a/ceph/src/crimson/os/seastore/segment_manager.h b/ceph/src/crimson/os/seastore/segment_manager.h index 1669d124a..719fa6075 100644 --- a/ceph/src/crimson/os/seastore/segment_manager.h +++ b/ceph/src/crimson/os/seastore/segment_manager.h @@ -153,7 +153,7 @@ public: * advance_wp * * advance the segment write pointer, - * needed when writing at wp is strictly implemented. ex: ZNS backed segments + * needed when writing at wp is strictly implemented. ex: ZBD backed segments * @param offset: advance write pointer till the given offset */ virtual write_ertr::future<> advance_wp( diff --git a/ceph/src/crimson/os/seastore/segment_manager/zns.cc b/ceph/src/crimson/os/seastore/segment_manager/zbd.cc similarity index 71% rename from ceph/src/crimson/os/seastore/segment_manager/zns.cc rename to ceph/src/crimson/os/seastore/segment_manager/zbd.cc index deaaadf66..88521a947 100644 --- a/ceph/src/crimson/os/seastore/segment_manager/zns.cc +++ b/ceph/src/crimson/os/seastore/segment_manager/zbd.cc @@ -6,9 +6,10 @@ #include #include -#include "crimson/os/seastore/segment_manager/zns.h" +#include "crimson/os/seastore/segment_manager/zbd.h" #include "crimson/common/config_proxy.h" #include "crimson/os/seastore/logging.h" +#include "crimson/common/errorator-loop.h" #include "include/buffer.h" SET_SUBSYS(seastore_device); @@ -16,9 +17,9 @@ SET_SUBSYS(seastore_device); #define SECT_SHIFT 9 #define RESERVED_ZONES 1 // limit the max padding buf size to 1MB -#define MAX_PADDING_SIZE 1048576 +#define MAX_PADDING_SIZE 4194304 -using z_op = crimson::os::seastore::segment_manager::zns::zone_op; +using z_op = crimson::os::seastore::segment_manager::zbd::zone_op; template <> struct fmt::formatter: fmt::formatter { template auto format(z_op s, FormatContext& ctx) { @@ -42,15 +43,15 @@ template <> struct fmt::formatter: fmt::formatter { } }; -namespace crimson::os::seastore::segment_manager::zns { +namespace crimson::os::seastore::segment_manager::zbd { -using open_device_ret = ZNSSegmentManager::access_ertr::future< +using open_device_ret = ZBDSegmentManager::access_ertr::future< std::pair>; static open_device_ret open_device( const std::string &path, seastar::open_flags mode) { - LOG_PREFIX(ZNSSegmentManager::open_device); + LOG_PREFIX(ZBDSegmentManager::open_device); return seastar::file_stat( path, seastar::follow_symlink::yes ).then([FNAME, mode, &path](auto stat) mutable { @@ -69,41 +70,41 @@ static open_device_ret open_device( ); } -static zns_sm_metadata_t make_metadata( +static zbd_sm_metadata_t make_metadata( uint64_t total_size, seastore_meta_t meta, const seastar::stat_data &data, size_t zone_size_sectors, size_t zone_capacity_sectors, + size_t nr_cnv_zones, size_t num_zones) { - LOG_PREFIX(ZNSSegmentManager::make_metadata); + LOG_PREFIX(ZBDSegmentManager::make_metadata); + + // Using only SWR zones in a SMR drive, for now + auto skipped_zones = RESERVED_ZONES + nr_cnv_zones; + assert(num_zones > skipped_zones); // TODO: support Option::size_t seastore_segment_size // to allow zones_per_segment > 1 with striping. size_t zone_size = zone_size_sectors << SECT_SHIFT; + assert(total_size == num_zones * zone_size); size_t zone_capacity = zone_capacity_sectors << SECT_SHIFT; size_t segment_size = zone_size; size_t zones_per_segment = segment_size / zone_size; - size_t segments = (num_zones - RESERVED_ZONES) / zones_per_segment; + size_t segments = (num_zones - skipped_zones) / zones_per_segment; size_t per_shard_segments = segments / seastar::smp::count; size_t available_size = zone_capacity * segments; size_t per_shard_available_size = zone_capacity * per_shard_segments; - std::vector shard_infos(seastar::smp::count); - for (unsigned int i = 0; i < seastar::smp::count; i++) { - shard_infos[i].size = per_shard_available_size; - shard_infos[i].segments = per_shard_segments; - shard_infos[i].first_segment_offset = zone_size * RESERVED_ZONES - + i * segment_size* per_shard_segments; - } - assert(total_size == num_zones * zone_size); WARN("Ignoring configuration values for device and segment size"); INFO( - "device size {}, available_size {}, block_size {}, allocated_size {}," - " total zones {}, zone_size {}, zone_capacity {}," - " total segments {}, zones per segment {}, segment size {}", + "device size: {}, available size: {}, block size: {}, allocated size: {}," + " total zones {}, zone size: {}, zone capacity: {}," + " total segments: {}, zones per segment: {}, segment size: {}" + " conv zones: {}, swr zones: {}, per shard segments: {}" + " per shard available size: {}", total_size, available_size, data.block_size, @@ -113,9 +114,23 @@ static zns_sm_metadata_t make_metadata( zone_capacity, segments, zones_per_segment, - zone_capacity * zones_per_segment); + zone_capacity * zones_per_segment, + nr_cnv_zones, + num_zones - nr_cnv_zones, + per_shard_segments, + per_shard_available_size); + + std::vector shard_infos(seastar::smp::count); + for (unsigned int i = 0; i < seastar::smp::count; i++) { + shard_infos[i].size = per_shard_available_size; + shard_infos[i].segments = per_shard_segments; + shard_infos[i].first_segment_offset = zone_size * skipped_zones + + i * segment_size * per_shard_segments; + INFO("First segment offset for shard {} is: {}", + i, shard_infos[i].first_segment_offset); + } - zns_sm_metadata_t ret = zns_sm_metadata_t{ + zbd_sm_metadata_t ret = zbd_sm_metadata_t{ seastar::smp::count, segment_size, zone_capacity * zones_per_segment, @@ -200,12 +215,39 @@ static seastar::future get_zone_capacity( ); } +// get the number of conventional zones of SMR HDD, +// they are randomly writable and don't respond to zone operations +static seastar::future get_nr_cnv_zones( + seastar::file &device, + uint32_t nr_zones) +{ + return seastar::do_with( + ZoneReport(nr_zones), + [&](auto &zr) { + zr.hdr->sector = 0; + zr.hdr->nr_zones = nr_zones; + return device.ioctl( + BLKREPORTZONE, + zr.hdr + ).then([&, nr_zones](int ret) { + size_t cnv_zones = 0; + for (uint32_t i = 0; i < nr_zones; i++) { + if (zr.hdr->zones[i].type == BLK_ZONE_TYPE_CONVENTIONAL) + cnv_zones++; + } + return seastar::make_ready_future(cnv_zones); + }); + } + ); +} + + static write_ertr::future<> do_write( seastar::file &device, uint64_t offset, bufferptr &bptr) { - LOG_PREFIX(ZNSSegmentManager::do_write); + LOG_PREFIX(ZBDSegmentManager::do_write); DEBUG("offset {} len {}", offset, bptr.length()); @@ -228,48 +270,65 @@ static write_ertr::future<> do_write( } static write_ertr::future<> do_writev( + device_id_t device_id, seastar::file &device, uint64_t offset, bufferlist&& bl, size_t block_size) { - LOG_PREFIX(ZNSSegmentManager::do_writev); - DEBUG("offset {} len {}", - offset, - bl.length()); + LOG_PREFIX(ZBDSegmentManager::do_writev); + DEBUG("{} offset {} len {}", + device_id_printer_t{device_id}, offset, bl.length()); // writev requires each buffer to be aligned to the disks' block // size, we need to rebuild here bl.rebuild_aligned(block_size); - std::vector iov; - bl.prepare_iov(&iov); - return device.dma_write( - offset, - std::move(iov) - ).handle_exception( - [FNAME](auto e) -> write_ertr::future { - ERROR("dma_write got error {}", - e); - return crimson::ct_error::input_output_error::make(); - } - ).then([bl=std::move(bl)/* hold the buf until the end of io */](size_t written) - -> write_ertr::future<> { - if (written != bl.length()) { - return crimson::ct_error::input_output_error::make(); - } - return write_ertr::now(); + return seastar::do_with( + bl.prepare_iovs(), + std::move(bl), + [&device, device_id, offset, FNAME](auto& iovs, auto& bl) + { + return write_ertr::parallel_for_each( + iovs, + [&device, device_id, offset, FNAME](auto& p) + { + auto off = offset + p.offset; + auto len = p.length; + auto& iov = p.iov; + DEBUG("{} poffset={}~{} dma_write ...", + device_id_printer_t{device_id}, + off, len); + return device.dma_write(off, std::move(iov) + ).handle_exception( + [FNAME, device_id, off, len](auto e) -> write_ertr::future + { + ERROR("{} poffset={}~{} dma_write got error -- {}", + device_id_printer_t{device_id}, off, len, e); + return crimson::ct_error::input_output_error::make(); + }).then([FNAME, device_id, off, len](size_t written) -> write_ertr::future<> { + if (written != len) { + ERROR("{} poffset={}~{} dma_write len={} inconsistent", + device_id_printer_t{device_id}, off, len, written); + return crimson::ct_error::input_output_error::make(); + } + DEBUG("{} poffset={}~{} dma_write done", + device_id_printer_t{device_id}, + off, len); + return write_ertr::now(); + }); + }); }); } -static ZNSSegmentManager::access_ertr::future<> -write_metadata(seastar::file &device, zns_sm_metadata_t sb) +static ZBDSegmentManager::access_ertr::future<> +write_metadata(seastar::file &device, zbd_sm_metadata_t sb) { - assert(ceph::encoded_sizeof_bounded() < + assert(ceph::encoded_sizeof_bounded() < sb.block_size); return seastar::do_with( bufferptr(ceph::buffer::create_page_aligned(sb.block_size)), [=, &device](auto &bp) { - LOG_PREFIX(ZNSSegmentManager::write_metadata); + LOG_PREFIX(ZBDSegmentManager::write_metadata); DEBUG("block_size {}", sb.block_size); bufferlist bl; encode(sb, bl); @@ -288,7 +347,7 @@ static read_ertr::future<> do_read( size_t len, bufferptr &bptr) { - LOG_PREFIX(ZNSSegmentManager::do_read); + LOG_PREFIX(ZBDSegmentManager::do_read); assert(len <= bptr.length()); DEBUG("offset {} len {}", offset, @@ -312,10 +371,10 @@ static read_ertr::future<> do_read( } static -ZNSSegmentManager::access_ertr::future +ZBDSegmentManager::access_ertr::future read_metadata(seastar::file &device, seastar::stat_data sd) { - assert(ceph::encoded_sizeof_bounded() < + assert(ceph::encoded_sizeof_bounded() < sd.block_size); return seastar::do_with( bufferptr(ceph::buffer::create_page_aligned(sd.block_size)), @@ -328,29 +387,29 @@ read_metadata(seastar::file &device, seastar::stat_data sd) ).safe_then([=, &bp] { bufferlist bl; bl.push_back(bp); - zns_sm_metadata_t ret; + zbd_sm_metadata_t ret; auto bliter = bl.cbegin(); decode(ret, bliter); ret.validate(); - return ZNSSegmentManager::access_ertr::future( - ZNSSegmentManager::access_ertr::ready_future_marker{}, + return ZBDSegmentManager::access_ertr::future( + ZBDSegmentManager::access_ertr::ready_future_marker{}, ret); }); }); } -ZNSSegmentManager::mount_ret ZNSSegmentManager::mount() +ZBDSegmentManager::mount_ret ZBDSegmentManager::mount() { return shard_devices.invoke_on_all([](auto &local_device) { return local_device.shard_mount( ).handle_error( crimson::ct_error::assert_all{ - "Invalid error in ZNSSegmentManager::mount" + "Invalid error in ZBDSegmentManager::mount" }); }); } -ZNSSegmentManager::mount_ret ZNSSegmentManager::shard_mount() +ZBDSegmentManager::mount_ret ZBDSegmentManager::shard_mount() { return open_device( device_path, seastar::open_flags::rw @@ -365,7 +424,7 @@ ZNSSegmentManager::mount_ret ZNSSegmentManager::shard_mount() }); } -ZNSSegmentManager::mkfs_ret ZNSSegmentManager::mkfs( +ZBDSegmentManager::mkfs_ret ZBDSegmentManager::mkfs( device_config_t config) { return shard_devices.local().primary_mkfs(config @@ -374,29 +433,37 @@ ZNSSegmentManager::mkfs_ret ZNSSegmentManager::mkfs( return local_device.shard_mkfs( ).handle_error( crimson::ct_error::assert_all{ - "Invalid error in ZNSSegmentManager::mkfs" + "Invalid error in ZBDSegmentManager::mkfs" }); }); }); } -ZNSSegmentManager::mkfs_ret ZNSSegmentManager::primary_mkfs( +ZBDSegmentManager::mkfs_ret ZBDSegmentManager::primary_mkfs( device_config_t config) { - LOG_PREFIX(ZNSSegmentManager::primary_mkfs); + LOG_PREFIX(ZBDSegmentManager::primary_mkfs); INFO("starting, device_path {}", device_path); return seastar::do_with( seastar::file{}, seastar::stat_data{}, - zns_sm_metadata_t{}, + zbd_sm_metadata_t{}, + size_t(), size_t(), size_t(), size_t(), - [=, this](auto &device, auto &stat, auto &sb, auto &zone_size_sects, auto &nr_zones, auto &size) { + [=, this] + (auto &device, + auto &stat, + auto &sb, + auto &zone_size_sects, + auto &nr_zones, + auto &size, + auto &nr_cnv_zones) { return open_device( device_path, seastar::open_flags::rw - ).safe_then([=, this, &device, &stat, &sb, &zone_size_sects, &nr_zones, &size](auto p) { + ).safe_then([=, this, &device, &stat, &sb, &zone_size_sects, &nr_zones, &size, &nr_cnv_zones](auto p) { device = p.first; stat = p.second; return device.ioctl( @@ -415,6 +482,10 @@ ZNSSegmentManager::mkfs_ret ZNSSegmentManager::primary_mkfs( return get_blk_dev_size(device); }).then([&](auto devsize) { size = devsize; + return get_nr_cnv_zones(device, nr_zones); + }).then([&](auto cnv_zones) { + DEBUG("Found {} conventional zones", cnv_zones); + nr_cnv_zones = cnv_zones; return get_zone_capacity(device, nr_zones); }).then([&, FNAME, config](auto zone_capacity_sects) { ceph_assert(zone_capacity_sects); @@ -426,10 +497,11 @@ ZNSSegmentManager::mkfs_ret ZNSSegmentManager::primary_mkfs( stat, zone_size_sects, zone_capacity_sects, + nr_cnv_zones, nr_zones); metadata = sb; stats.metadata_write.increment( - ceph::encoded_sizeof_bounded()); + ceph::encoded_sizeof_bounded()); DEBUG("Wrote to stats."); return write_metadata(device, sb); }).finally([&, FNAME] { @@ -443,9 +515,9 @@ ZNSSegmentManager::mkfs_ret ZNSSegmentManager::primary_mkfs( }); } -ZNSSegmentManager::mkfs_ret ZNSSegmentManager::shard_mkfs() +ZBDSegmentManager::mkfs_ret ZBDSegmentManager::shard_mkfs() { - LOG_PREFIX(ZNSSegmentManager::shard_mkfs); + LOG_PREFIX(ZBDSegmentManager::shard_mkfs); INFO("starting, device_path {}", device_path); return open_device( device_path, seastar::open_flags::rw @@ -482,7 +554,7 @@ using blk_zone_op_ret = blk_zone_op_ertr::future<>; blk_zone_op_ret blk_zone_op(seastar::file &device, blk_zone_range &range, zone_op op) { - LOG_PREFIX(ZNSSegmentManager::blk_zone_op); + LOG_PREFIX(ZBDSegmentManager::blk_zone_op); unsigned long ioctl_op = 0; switch (op) { @@ -523,10 +595,10 @@ blk_zone_op_ret blk_zone_op(seastar::file &device, }); } -ZNSSegmentManager::open_ertr::future ZNSSegmentManager::open( +ZBDSegmentManager::open_ertr::future ZBDSegmentManager::open( segment_id_t id) { - LOG_PREFIX(ZNSSegmentManager::open); + LOG_PREFIX(ZBDSegmentManager::open); return seastar::do_with( blk_zone_range{}, [=, this](auto &range) { @@ -544,15 +616,15 @@ ZNSSegmentManager::open_ertr::future ZNSSegmentManager::open( DEBUG("segment {}, open successful", id); return open_ertr::future( open_ertr::ready_future_marker{}, - SegmentRef(new ZNSSegment(*this, id)) + SegmentRef(new ZBDSegment(*this, id)) ); }); } -ZNSSegmentManager::release_ertr::future<> ZNSSegmentManager::release( +ZBDSegmentManager::release_ertr::future<> ZBDSegmentManager::release( segment_id_t id) { - LOG_PREFIX(ZNSSegmentManager::release); + LOG_PREFIX(ZBDSegmentManager::release); DEBUG("Resetting zone/segment {}", id); return seastar::do_with( blk_zone_range{}, @@ -573,12 +645,12 @@ ZNSSegmentManager::release_ertr::future<> ZNSSegmentManager::release( }); } -SegmentManager::read_ertr::future<> ZNSSegmentManager::read( +SegmentManager::read_ertr::future<> ZBDSegmentManager::read( paddr_t addr, size_t len, ceph::bufferptr &out) { - LOG_PREFIX(ZNSSegmentManager::read); + LOG_PREFIX(ZBDSegmentManager::read); auto& seg_addr = addr.as_seg_paddr(); if (seg_addr.get_segment_id().device_segment_id() >= get_num_segments()) { ERROR("invalid segment {}", @@ -599,10 +671,10 @@ SegmentManager::read_ertr::future<> ZNSSegmentManager::read( out); } -Segment::close_ertr::future<> ZNSSegmentManager::segment_close( +Segment::close_ertr::future<> ZBDSegmentManager::segment_close( segment_id_t id, segment_off_t write_pointer) { - LOG_PREFIX(ZNSSegmentManager::segment_close); + LOG_PREFIX(ZBDSegmentManager::segment_close); return seastar::do_with( blk_zone_range{}, [=, this](auto &range) { @@ -622,12 +694,12 @@ Segment::close_ertr::future<> ZNSSegmentManager::segment_close( }); } -Segment::write_ertr::future<> ZNSSegmentManager::segment_write( +Segment::write_ertr::future<> ZBDSegmentManager::segment_write( paddr_t addr, ceph::bufferlist bl, bool ignore_check) { - LOG_PREFIX(ZNSSegmentManager::segment_write); + LOG_PREFIX(ZBDSegmentManager::segment_write); assert(addr.get_device_id() == get_device_id()); assert((bl.length() % metadata.block_size) == 0); auto& seg_addr = addr.as_seg_paddr(); @@ -638,33 +710,34 @@ Segment::write_ertr::future<> ZNSSegmentManager::segment_write( bl.length()); stats.data_write.increment(bl.length()); return do_writev( + get_device_id(), device, get_offset(addr), std::move(bl), metadata.block_size); } -device_id_t ZNSSegmentManager::get_device_id() const +device_id_t ZBDSegmentManager::get_device_id() const { return metadata.device_id; }; -secondary_device_set_t& ZNSSegmentManager::get_secondary_devices() +secondary_device_set_t& ZBDSegmentManager::get_secondary_devices() { return metadata.secondary_devices; }; -magic_t ZNSSegmentManager::get_magic() const +magic_t ZBDSegmentManager::get_magic() const { return metadata.magic; }; -segment_off_t ZNSSegment::get_write_capacity() const +segment_off_t ZBDSegment::get_write_capacity() const { return manager.get_segment_size(); } -SegmentManager::close_ertr::future<> ZNSSegmentManager::close() +SegmentManager::close_ertr::future<> ZBDSegmentManager::close() { if (device) { return device.close(); @@ -672,15 +745,15 @@ SegmentManager::close_ertr::future<> ZNSSegmentManager::close() return seastar::now(); } -Segment::close_ertr::future<> ZNSSegment::close() +Segment::close_ertr::future<> ZBDSegment::close() { return manager.segment_close(id, write_pointer); } -Segment::write_ertr::future<> ZNSSegment::write( +Segment::write_ertr::future<> ZBDSegment::write( segment_off_t offset, ceph::bufferlist bl) { - LOG_PREFIX(ZNSSegment::write); + LOG_PREFIX(ZBDSegment::write); if (offset != write_pointer || offset % manager.metadata.block_size != 0) { ERROR("Segment offset and zone write pointer mismatch. " "segment {} segment-offset {} write pointer {}", @@ -695,10 +768,10 @@ Segment::write_ertr::future<> ZNSSegment::write( return manager.segment_write(paddr_t::make_seg_paddr(id, offset), bl); } -Segment::write_ertr::future<> ZNSSegment::write_padding_bytes( +Segment::write_ertr::future<> ZBDSegment::write_padding_bytes( size_t padding_bytes) { - LOG_PREFIX(ZNSSegment::write_padding_bytes); + LOG_PREFIX(ZBDSegment::write_padding_bytes); DEBUG("Writing {} padding bytes to segment {} at wp {}", padding_bytes, id, write_pointer); @@ -726,10 +799,10 @@ Segment::write_ertr::future<> ZNSSegment::write_padding_bytes( } // Advance write pointer, to given offset. -Segment::write_ertr::future<> ZNSSegment::advance_wp( +Segment::write_ertr::future<> ZBDSegment::advance_wp( segment_off_t offset) { - LOG_PREFIX(ZNSSegment::advance_wp); + LOG_PREFIX(ZBDSegment::advance_wp); DEBUG("Advancing write pointer from {} to {}", write_pointer, offset); if (offset < write_pointer) { diff --git a/ceph/src/crimson/os/seastore/segment_manager/zns.h b/ceph/src/crimson/os/seastore/segment_manager/zbd.h similarity index 84% rename from ceph/src/crimson/os/seastore/segment_manager/zns.h rename to ceph/src/crimson/os/seastore/segment_manager/zbd.h index b98ff1c89..c18f46336 100644 --- a/ceph/src/crimson/os/seastore/segment_manager/zns.h +++ b/ceph/src/crimson/os/seastore/segment_manager/zbd.h @@ -17,14 +17,14 @@ #include "include/uuid.h" -namespace crimson::os::seastore::segment_manager::zns { +namespace crimson::os::seastore::segment_manager::zbd { - struct zns_shard_info_t { + struct zbd_shard_info_t { size_t size = 0; size_t segments = 0; size_t first_segment_offset = 0; - DENC(zns_shard_info_t, v, p) { + DENC(zbd_shard_info_t, v, p) { DENC_START(1, 1, p); denc(v.size, p); denc(v.segments, p); @@ -33,7 +33,7 @@ namespace crimson::os::seastore::segment_manager::zns { } }; - struct zns_sm_metadata_t { + struct zbd_sm_metadata_t { unsigned int shard_num = 0; size_t segment_size = 0; size_t segment_capacity = 0; @@ -42,7 +42,7 @@ namespace crimson::os::seastore::segment_manager::zns { size_t block_size = 0; size_t zone_size = 0; - std::vector shard_infos; + std::vector shard_infos; seastore_meta_t meta; @@ -52,7 +52,7 @@ namespace crimson::os::seastore::segment_manager::zns { device_id_t device_id = 0; secondary_device_set_t secondary_devices; - DENC(zns_sm_metadata_t, v, p) { + DENC(zbd_sm_metadata_t, v, p) { DENC_START(1, 1, p); denc(v.shard_num, p); denc(v.segment_size, p); @@ -95,11 +95,11 @@ namespace crimson::os::seastore::segment_manager::zns { RESET, }; - class ZNSSegmentManager; + class ZBDSegmentManager; - class ZNSSegment final : public Segment { + class ZBDSegment final : public Segment { public: - ZNSSegment(ZNSSegmentManager &man, segment_id_t i) : manager(man), id(i){}; + ZBDSegment(ZBDSegmentManager &man, segment_id_t i) : manager(man), id(i){}; segment_id_t get_segment_id() const final { return id; } segment_off_t get_write_capacity() const final; @@ -108,16 +108,16 @@ namespace crimson::os::seastore::segment_manager::zns { write_ertr::future<> write(segment_off_t offset, ceph::bufferlist bl) final; write_ertr::future<> advance_wp(segment_off_t offset) final; - ~ZNSSegment() {} + ~ZBDSegment() {} private: - friend class ZNSSegmentManager; - ZNSSegmentManager &manager; + friend class ZBDSegmentManager; + ZBDSegmentManager &manager; const segment_id_t id; segment_off_t write_pointer = 0; write_ertr::future<> write_padding_bytes(size_t padding_bytes); }; - class ZNSSegmentManager final : public SegmentManager{ + class ZBDSegmentManager final : public SegmentManager{ // interfaces used by Device public: seastar::future<> start() { @@ -135,9 +135,9 @@ namespace crimson::os::seastore::segment_manager::zns { mount_ret mount() final; mkfs_ret mkfs(device_config_t meta) final; - ZNSSegmentManager(const std::string &path) : device_path(path) {} + ZBDSegmentManager(const std::string &path) : device_path(path) {} - ~ZNSSegmentManager() final = default; + ~ZBDSegmentManager() final = default; //interfaces used by each shard device public: @@ -152,7 +152,7 @@ namespace crimson::os::seastore::segment_manager::zns { ceph::bufferptr &out) final; device_type_t get_device_type() const final { - return device_type_t::ZNS; + return device_type_t::ZBD; } size_t get_available_size() const final { @@ -183,10 +183,10 @@ namespace crimson::os::seastore::segment_manager::zns { bool ignore_check=false); private: - friend class ZNSSegment; + friend class ZBDSegment; std::string device_path; - zns_shard_info_t shard_info; - zns_sm_metadata_t metadata; + zbd_shard_info_t shard_info; + zbd_sm_metadata_t metadata; seastar::file device; uint32_t nr_zones; struct effort_t { @@ -199,7 +199,7 @@ namespace crimson::os::seastore::segment_manager::zns { } }; - struct zns_sm_stats { + struct zbd_sm_stats { effort_t data_read = {}; effort_t data_write = {}; effort_t metadata_write = {}; @@ -209,7 +209,7 @@ namespace crimson::os::seastore::segment_manager::zns { uint64_t released_segments = 0; void reset() { - *this = zns_sm_stats{}; + *this = zbd_sm_stats{}; } } stats; @@ -233,14 +233,14 @@ namespace crimson::os::seastore::segment_manager::zns { mount_ret shard_mount(); - seastar::sharded shard_devices; + seastar::sharded shard_devices; }; } WRITE_CLASS_DENC_BOUNDED( - crimson::os::seastore::segment_manager::zns::zns_shard_info_t + crimson::os::seastore::segment_manager::zbd::zbd_shard_info_t ) WRITE_CLASS_DENC_BOUNDED( - crimson::os::seastore::segment_manager::zns::zns_sm_metadata_t + crimson::os::seastore::segment_manager::zbd::zbd_sm_metadata_t ) diff --git a/ceph/src/crimson/os/seastore/segment_manager_group.cc b/ceph/src/crimson/os/seastore/segment_manager_group.cc index e78e299e7..332b794b7 100644 --- a/ceph/src/crimson/os/seastore/segment_manager_group.cc +++ b/ceph/src/crimson/os/seastore/segment_manager_group.cc @@ -91,14 +91,10 @@ SegmentManagerGroup::read_segment_header(segment_id_t segment) }); } -SegmentManagerGroup::scan_valid_records_ret -SegmentManagerGroup::scan_valid_records( - scan_valid_records_cursor &cursor, - segment_nonce_t nonce, - size_t budget, - found_record_handler_t &handler) +void SegmentManagerGroup::initialize_cursor( + scan_valid_records_cursor &cursor) { - LOG_PREFIX(SegmentManagerGroup::scan_valid_records); + LOG_PREFIX(SegmentManagerGroup::initialize_cursor); assert(has_device(cursor.get_segment_id().device_id())); auto& segment_manager = *segment_managers[cursor.get_segment_id().device_id()]; @@ -106,220 +102,24 @@ SegmentManagerGroup::scan_valid_records( INFO("start to scan segment {}", cursor.get_segment_id()); cursor.increment_seq(segment_manager.get_block_size()); } - DEBUG("starting at {}, budget={}", cursor, budget); - auto retref = std::make_unique(0); - auto &budget_used = *retref; - return crimson::repeat( - [=, &cursor, &budget_used, &handler, this]() mutable - -> scan_valid_records_ertr::future { - return [=, &handler, &cursor, &budget_used, this] { - if (!cursor.last_valid_header_found) { - return read_validate_record_metadata(cursor.seq.offset, nonce - ).safe_then([=, &cursor](auto md) { - if (!md) { - cursor.last_valid_header_found = true; - if (cursor.is_complete()) { - INFO("complete at {}, invalid record group metadata", - cursor); - } else { - DEBUG("found invalid record group metadata at {}, " - "processing {} pending record groups", - cursor.seq, - cursor.pending_record_groups.size()); - } - return scan_valid_records_ertr::now(); - } else { - auto& [header, md_bl] = *md; - DEBUG("found valid {} at {}", header, cursor.seq); - cursor.emplace_record_group(header, std::move(md_bl)); - return scan_valid_records_ertr::now(); - } - }).safe_then([=, &cursor, &budget_used, &handler, this] { - DEBUG("processing committed record groups until {}, {} pending", - cursor.last_committed, - cursor.pending_record_groups.size()); - return crimson::repeat( - [=, &budget_used, &cursor, &handler, this] { - if (cursor.pending_record_groups.empty()) { - /* This is only possible if the segment is empty. - * A record's last_commited must be prior to its own - * location since it itself cannot yet have been committed - * at its own time of submission. Thus, the most recently - * read record must always fall after cursor.last_committed */ - return scan_valid_records_ertr::make_ready_future< - seastar::stop_iteration>(seastar::stop_iteration::yes); - } - auto &next = cursor.pending_record_groups.front(); - journal_seq_t next_seq = {cursor.seq.segment_seq, next.offset}; - if (cursor.last_committed == JOURNAL_SEQ_NULL || - next_seq > cursor.last_committed) { - return scan_valid_records_ertr::make_ready_future< - seastar::stop_iteration>(seastar::stop_iteration::yes); - } - return consume_next_records(cursor, handler, budget_used - ).safe_then([] { - return scan_valid_records_ertr::make_ready_future< - seastar::stop_iteration>(seastar::stop_iteration::no); - }); - }); - }); - } else { - assert(!cursor.pending_record_groups.empty()); - auto &next = cursor.pending_record_groups.front(); - return read_validate_data(next.offset, next.header - ).safe_then([this, FNAME, &budget_used, &cursor, &handler, &next](auto valid) { - if (!valid) { - INFO("complete at {}, invalid record group data at {}, {}", - cursor, next.offset, next.header); - cursor.pending_record_groups.clear(); - return scan_valid_records_ertr::now(); - } - return consume_next_records(cursor, handler, budget_used); - }); - } - }().safe_then([=, &budget_used, &cursor] { - if (cursor.is_complete() || budget_used >= budget) { - DEBUG("finish at {}, budget_used={}, budget={}", - cursor, budget_used, budget); - return seastar::stop_iteration::yes; - } else { - return seastar::stop_iteration::no; - } - }); - }).safe_then([retref=std::move(retref)]() mutable -> scan_valid_records_ret { - return scan_valid_records_ret( - scan_valid_records_ertr::ready_future_marker{}, - std::move(*retref)); - }); + cursor.block_size = segment_manager.get_block_size(); } -SegmentManagerGroup::read_validate_record_metadata_ret -SegmentManagerGroup::read_validate_record_metadata( - paddr_t start, - segment_nonce_t nonce) +SegmentManagerGroup::read_ret +SegmentManagerGroup::read(paddr_t start, size_t len) { - LOG_PREFIX(SegmentManagerGroup::read_validate_record_metadata); - auto& seg_addr = start.as_seg_paddr(); - assert(has_device(seg_addr.get_segment_id().device_id())); - auto& segment_manager = *segment_managers[seg_addr.get_segment_id().device_id()]; - auto block_size = segment_manager.get_block_size(); - auto segment_size = static_cast(segment_manager.get_segment_size()); - if (seg_addr.get_segment_off() + block_size > segment_size) { - DEBUG("failed -- record group header block {}~4096 > segment_size {}", start, segment_size); - return read_validate_record_metadata_ret( - read_validate_record_metadata_ertr::ready_future_marker{}, - std::nullopt); - } - TRACE("reading record group header block {}~4096", start); - return segment_manager.read(start, block_size - ).safe_then([=, &segment_manager](bufferptr bptr) mutable - -> read_validate_record_metadata_ret { - auto block_size = segment_manager.get_block_size(); - bufferlist bl; - bl.append(bptr); - auto maybe_header = try_decode_records_header(bl, nonce); - if (!maybe_header.has_value()) { - return read_validate_record_metadata_ret( - read_validate_record_metadata_ertr::ready_future_marker{}, - std::nullopt); - } - auto& seg_addr = start.as_seg_paddr(); - auto& header = *maybe_header; - if (header.mdlength < block_size || - header.mdlength % block_size != 0 || - header.dlength % block_size != 0 || - (header.committed_to != JOURNAL_SEQ_NULL && - header.committed_to.offset.as_seg_paddr().get_segment_off() % block_size != 0) || - (seg_addr.get_segment_off() + header.mdlength + header.dlength > segment_size)) { - ERROR("failed, invalid record group header {}", start); - return crimson::ct_error::input_output_error::make(); - } - if (header.mdlength == block_size) { - return read_validate_record_metadata_ret( - read_validate_record_metadata_ertr::ready_future_marker{}, - std::make_pair(std::move(header), std::move(bl)) - ); - } - - auto rest_start = paddr_t::make_seg_paddr( - seg_addr.get_segment_id(), - seg_addr.get_segment_off() + block_size - ); - auto rest_len = header.mdlength - block_size; - TRACE("reading record group header rest {}~{}", rest_start, rest_len); - return segment_manager.read(rest_start, rest_len - ).safe_then([header=std::move(header), bl=std::move(bl) - ](auto&& bptail) mutable { - bl.push_back(bptail); - return read_validate_record_metadata_ret( - read_validate_record_metadata_ertr::ready_future_marker{}, - std::make_pair(std::move(header), std::move(bl))); - }); - }).safe_then([](auto p) { - if (p && validate_records_metadata(p->second)) { - return read_validate_record_metadata_ret( - read_validate_record_metadata_ertr::ready_future_marker{}, - std::move(*p) - ); - } else { - return read_validate_record_metadata_ret( - read_validate_record_metadata_ertr::ready_future_marker{}, - std::nullopt); - } - }); -} - -SegmentManagerGroup::read_validate_data_ret -SegmentManagerGroup::read_validate_data( - paddr_t record_base, - const record_group_header_t &header) -{ - LOG_PREFIX(SegmentManagerGroup::read_validate_data); - assert(has_device(record_base.get_device_id())); - auto& segment_manager = *segment_managers[record_base.get_device_id()]; - auto data_addr = record_base.add_offset(header.mdlength); - TRACE("reading record group data blocks {}~{}", data_addr, header.dlength); + LOG_PREFIX(SegmentManagerGroup::read); + assert(has_device(start.get_device_id())); + auto& segment_manager = *segment_managers[start.get_device_id()]; + TRACE("reading data {}~{}", start, len); return segment_manager.read( - data_addr, - header.dlength - ).safe_then([=, &header](auto bptr) { - bufferlist bl; - bl.append(bptr); - return validate_records_data(header, bl); - }); -} - -SegmentManagerGroup::consume_record_group_ertr::future<> -SegmentManagerGroup::consume_next_records( - scan_valid_records_cursor& cursor, - found_record_handler_t& handler, - std::size_t& budget_used) -{ - LOG_PREFIX(SegmentManagerGroup::consume_next_records); - auto& next = cursor.pending_record_groups.front(); - auto total_length = next.header.dlength + next.header.mdlength; - budget_used += total_length; - auto locator = record_locator_t{ - next.offset.add_offset(next.header.mdlength), - write_result_t{ - journal_seq_t{ - cursor.seq.segment_seq, - next.offset - }, - total_length - } - }; - DEBUG("processing {} at {}, budget_used={}", - next.header, locator, budget_used); - return handler( - locator, - next.header, - next.mdbuffer - ).safe_then([FNAME, &cursor] { - cursor.pop_record_group(); - if (cursor.is_complete()) { - INFO("complete at {}, no more record group", cursor); - } + start, + len + ).safe_then([](auto bptr) { + return read_ret( + read_ertr::ready_future_marker{}, + std::move(bptr) + ); }); } diff --git a/ceph/src/crimson/os/seastore/segment_manager_group.h b/ceph/src/crimson/os/seastore/segment_manager_group.h index bd5af9601..f193b5eed 100644 --- a/ceph/src/crimson/os/seastore/segment_manager_group.h +++ b/ceph/src/crimson/os/seastore/segment_manager_group.h @@ -8,10 +8,11 @@ #include "crimson/common/errorator.h" #include "crimson/os/seastore/seastore_types.h" #include "crimson/os/seastore/segment_manager.h" +#include "crimson/os/seastore/record_scanner.h" namespace crimson::os::seastore { -class SegmentManagerGroup { +class SegmentManagerGroup : public RecordScanner { public: SegmentManagerGroup() { segment_managers.resize(DEVICE_ID_MAX, nullptr); @@ -96,24 +97,6 @@ public: segment_tail_t>; read_segment_tail_ret read_segment_tail(segment_id_t segment); - using read_ertr = SegmentManager::read_ertr; - using scan_valid_records_ertr = read_ertr; - using scan_valid_records_ret = scan_valid_records_ertr::future< - size_t>; - using found_record_handler_t = std::function< - scan_valid_records_ertr::future<>( - record_locator_t record_locator, - // callee may assume header and bl will remain valid until - // returned future resolves - const record_group_header_t &header, - const bufferlist &mdbuf)>; - scan_valid_records_ret scan_valid_records( - scan_valid_records_cursor &cursor, ///< [in, out] cursor, updated during call - segment_nonce_t nonce, ///< [in] nonce for segment - size_t budget, ///< [in] max budget to use - found_record_handler_t &handler ///< [in] handler for records - ); ///< @return used budget - /* * read journal segment headers */ @@ -143,30 +126,20 @@ private: return device_ids.count(id) >= 1; } - /// read record metadata for record starting at start - using read_validate_record_metadata_ertr = read_ertr; - using read_validate_record_metadata_ret = - read_validate_record_metadata_ertr::future< - std::optional> - >; - read_validate_record_metadata_ret read_validate_record_metadata( - paddr_t start, - segment_nonce_t nonce); - - /// read and validate data - using read_validate_data_ertr = read_ertr; - using read_validate_data_ret = read_validate_data_ertr::future; - read_validate_data_ret read_validate_data( - paddr_t record_base, - const record_group_header_t &header ///< caller must ensure lifetime through - /// future resolution - ); - - using consume_record_group_ertr = scan_valid_records_ertr; - consume_record_group_ertr::future<> consume_next_records( - scan_valid_records_cursor& cursor, - found_record_handler_t& handler, - std::size_t& budget_used); + void initialize_cursor(scan_valid_records_cursor &cursor) final; + + read_ret read(paddr_t start, size_t len) final; + + bool is_record_segment_seq_invalid(scan_valid_records_cursor &cursor, + record_group_header_t &header) final { + return false; + } + + int64_t get_segment_end_offset(paddr_t addr) final { + auto& seg_addr = addr.as_seg_paddr(); + auto& segment_manager = *segment_managers[seg_addr.get_segment_id().device_id()]; + return static_cast(segment_manager.get_segment_size()); + } std::vector segment_managers; std::set device_ids; diff --git a/ceph/src/crimson/os/seastore/transaction_manager.cc b/ceph/src/crimson/os/seastore/transaction_manager.cc index eda9ca1c5..ad8e5f1a6 100644 --- a/ceph/src/crimson/os/seastore/transaction_manager.cc +++ b/ceph/src/crimson/os/seastore/transaction_manager.cc @@ -216,7 +216,7 @@ TransactionManager::ref_ret TransactionManager::dec_ref( { LOG_PREFIX(TransactionManager::dec_ref); TRACET("{}", t, *ref); - return lba_manager->decref_extent(t, ref->get_laddr() + return lba_manager->decref_extent(t, ref->get_laddr(), true ).si_then([this, FNAME, &t, ref](auto result) { DEBUGT("extent refcount is decremented to {} -- {}", t, result.refcount, *ref); @@ -227,29 +227,29 @@ TransactionManager::ref_ret TransactionManager::dec_ref( }); } -TransactionManager::ref_ret TransactionManager::dec_ref( +TransactionManager::ref_ret TransactionManager::_dec_ref( Transaction &t, - laddr_t offset) + laddr_t offset, + bool cascade_remove) { - LOG_PREFIX(TransactionManager::dec_ref); + LOG_PREFIX(TransactionManager::_dec_ref); TRACET("{}", t, offset); - return lba_manager->decref_extent(t, offset + return lba_manager->decref_extent(t, offset, cascade_remove ).si_then([this, FNAME, offset, &t](auto result) -> ref_ret { DEBUGT("extent refcount is decremented to {} -- {}~{}, {}", t, result.refcount, offset, result.length, result.addr); - if (result.refcount == 0 && !result.addr.is_zero()) { - return cache->retire_extent_addr( - t, result.addr, result.length - ).si_then([] { - return ref_ret( - interruptible::ready_future_marker{}, - 0); - }); - } else { - return ref_ret( - interruptible::ready_future_marker{}, - result.refcount); + auto fut = ref_iertr::now(); + if (result.refcount == 0) { + if (result.addr.is_paddr() && + !result.addr.get_paddr().is_zero()) { + fut = cache->retire_extent_addr( + t, result.addr.get_paddr(), result.length); + } } + + return fut.si_then([result=std::move(result)] { + return result.refcount; + }); }); } @@ -666,7 +666,7 @@ TransactionManagerRef make_transaction_manager( ->get_journal_size() - primary_device->get_block_size(); // see CircularBoundedJournal::get_records_start() roll_start = static_cast(primary_device) - ->get_journal_start() + primary_device->get_block_size(); + ->get_shard_journal_start() + primary_device->get_block_size(); ceph_assert_always(roll_size <= DEVICE_OFF_MAX); ceph_assert_always((std::size_t)roll_size + roll_start <= primary_device->get_available_size()); diff --git a/ceph/src/crimson/os/seastore/transaction_manager.h b/ceph/src/crimson/os/seastore/transaction_manager.h index 7a67d4efe..dd1898ba7 100644 --- a/ceph/src/crimson/os/seastore/transaction_manager.h +++ b/ceph/src/crimson/os/seastore/transaction_manager.h @@ -178,7 +178,15 @@ public: { auto v = pin->get_logical_extent(t); if (v.has_child()) { - return v.get_child_fut().then([](auto extent) { + return v.get_child_fut().safe_then([pin=std::move(pin)](auto extent) { +#ifndef NDEBUG + auto lextent = extent->template cast(); + auto pin_laddr = pin->get_key(); + if (pin->is_indirect()) { + pin_laddr = pin->get_intermediate_base(); + } + assert(lextent->get_laddr() == pin_laddr); +#endif return extent->template cast(); }); } else { @@ -245,7 +253,9 @@ public: /// Remove refcount for offset ref_ret dec_ref( Transaction &t, - laddr_t offset); + laddr_t offset) { + return _dec_ref(t, offset, true); + } /// remove refcount for list of offset using refs_ret = ref_iertr::future>; @@ -282,7 +292,7 @@ public: laddr_hint, len, ext->get_paddr(), - ext.get() + *ext ).si_then([ext=std::move(ext), laddr_hint, &t](auto &&) mutable { LOG_PREFIX(TransactionManager::alloc_extent); SUBDEBUGT(seastore_tm, "new extent: {}, laddr_hint: {}", t, *ext, laddr_hint); @@ -292,67 +302,155 @@ public: } /** - * map_existing_extent + * remap_pin * - * Allocates a new extent at given existing_paddr that must be absolute and - * reads disk to fill the extent. - * The common usage is that remove the LogicalCachedExtent (laddr~length at paddr) - * and map extent to multiple new extents. - * placement_hint and generation should follow the original extent. + * Remap original extent to new extents. + * Return the pins of new extent. */ - using map_existing_extent_iertr = - alloc_extent_iertr::extend_ertr; - template - using map_existing_extent_ret = - map_existing_extent_iertr::future>; - template - map_existing_extent_ret map_existing_extent( + struct remap_entry { + extent_len_t offset; + extent_len_t len; + remap_entry(extent_len_t _offset, extent_len_t _len) { + offset = _offset; + len = _len; + } + }; + using remap_pin_iertr = base_iertr; + template + using remap_pin_ret = remap_pin_iertr::future>; + template + remap_pin_ret remap_pin( Transaction &t, - laddr_t laddr_hint, - paddr_t existing_paddr, - extent_len_t length) { - LOG_PREFIX(TransactionManager::map_existing_extent); - // FIXME: existing_paddr can be absolute and pending - ceph_assert(existing_paddr.is_absolute()); - assert(t.is_retired(existing_paddr, length)); - - SUBDEBUGT(seastore_tm, " laddr_hint: {} existing_paddr: {} length: {}", - t, laddr_hint, existing_paddr, length); - auto bp = ceph::bufferptr(buffer::create_page_aligned(length)); - bp.zero(); - - // ExtentPlacementManager::alloc_new_extent will make a new - // (relative/temp) paddr, so make extent directly - auto ext = CachedExtent::make_cached_extent_ref(std::move(bp)); + LBAMappingRef &&pin, + std::array remaps) { - ext->init(CachedExtent::extent_state_t::EXIST_CLEAN, - existing_paddr, - PLACEMENT_HINT_NULL, - NULL_GENERATION, - t.get_trans_id()); - - t.add_fresh_extent(ext); - - return lba_manager->alloc_extent( - t, - laddr_hint, - length, - existing_paddr, - ext.get() - ).si_then([ext=std::move(ext), laddr_hint, this](auto &&ref) { - ceph_assert(laddr_hint == ref->get_key()); - return epm->read( - ext->get_paddr(), - ext->get_length(), - ext->get_bptr() - ).safe_then([ext=std::move(ext)] { - return map_existing_extent_iertr::make_ready_future> - (std::move(ext)); +#ifndef NDEBUG + std::sort(remaps.begin(), remaps.end(), + [](remap_entry x, remap_entry y) { + return x.offset < y.offset; + }); + auto original_len = pin->get_length(); + extent_len_t total_remap_len = 0; + extent_len_t last_offset = 0; + extent_len_t last_len = 0; + + for (auto &remap : remaps) { + auto remap_offset = remap.offset; + auto remap_len = remap.len; + total_remap_len += remap.len; + ceph_assert(remap_offset >= (last_offset + last_len)); + last_offset = remap_offset; + last_len = remap_len; + } + ceph_assert(total_remap_len < original_len); +#endif + + // FIXME: paddr can be absolute and pending + ceph_assert(pin->get_val().is_absolute()); + return cache->get_extent_if_cached( + t, pin->get_val(), T::TYPE + ).si_then([this, &t, remaps, + original_laddr = pin->get_key(), + intermediate_base = pin->is_indirect() + ? pin->get_intermediate_base() + : L_ADDR_NULL, + intermediate_key = pin->is_indirect() + ? pin->get_intermediate_key() + : L_ADDR_NULL, + original_paddr = pin->get_val(), + original_len = pin->get_length()](auto ext) mutable { + std::optional original_bptr; + LOG_PREFIX(TransactionManager::remap_pin); + SUBDEBUGT(seastore_tm, + "original laddr: {}, original paddr: {}, original length: {}," + " intermediate_base: {}, intermediate_key: {}," + " remap to {} extents", + t, original_laddr, original_paddr, original_len, + intermediate_base, intermediate_key, remaps.size()); + ceph_assert( + (intermediate_base == L_ADDR_NULL) + == (intermediate_key == L_ADDR_NULL)); + if (ext) { + // FIXME: cannot and will not remap a dirty extent for now. + ceph_assert(!ext->is_dirty()); + ceph_assert(!ext->is_mutable()); + ceph_assert(ext->get_length() >= original_len); + ceph_assert(ext->get_paddr() == original_paddr); + original_bptr = ext->get_bptr(); + } + return seastar::do_with( + std::array(), + 0, + std::move(original_bptr), + std::vector(remaps.begin(), remaps.end()), + [this, &t, original_laddr, original_paddr, + original_len, intermediate_base, intermediate_key] + (auto &ret, auto &count, auto &original_bptr, auto &remaps) { + return _dec_ref(t, original_laddr, false + ).si_then([this, &t, &original_bptr, &ret, &count, + &remaps, intermediate_base, intermediate_key, + original_laddr, original_paddr, original_len](auto) { + return trans_intr::do_for_each( + remaps.begin(), + remaps.end(), + [this, &t, &original_bptr, &ret, + &count, intermediate_base, intermediate_key, + original_laddr, original_paddr, original_len](auto &remap) { + LOG_PREFIX(TransactionManager::remap_pin); + auto remap_offset = remap.offset; + auto remap_len = remap.len; + auto remap_laddr = original_laddr + remap_offset; + auto remap_paddr = original_paddr.add_offset(remap_offset); + ceph_assert(remap_len < original_len); + ceph_assert(remap_offset + remap_len <= original_len); + ceph_assert(remap_len != 0); + ceph_assert(remap_offset % cache->get_block_size() == 0); + ceph_assert(remap_len % cache->get_block_size() == 0); + SUBDEBUGT(seastore_tm, + "remap laddr: {}, remap paddr: {}, remap length: {}", t, + remap_laddr, remap_paddr, remap_len); + auto remapped_intermediate_key = intermediate_key; + if (remapped_intermediate_key != L_ADDR_NULL) { + assert(intermediate_base != L_ADDR_NULL); + remapped_intermediate_key += remap_offset; + } + return alloc_remapped_extent( + t, + remap_laddr, + remap_paddr, + remap_len, + original_laddr, + intermediate_base, + remapped_intermediate_key, + std::move(original_bptr) + ).si_then([&ret, &count, remap_laddr](auto &&npin) { + ceph_assert(npin->get_key() == remap_laddr); + ret[count++] = std::move(npin); + }); + }); + }).si_then([this, &t, intermediate_base, intermediate_key] { + if (N > 1 && intermediate_key != L_ADDR_NULL) { + return lba_manager->incref_extent( + t, intermediate_base, N - 1 + ).si_then([](auto) { + return seastar::now(); + }); + } + return LBAManager::ref_iertr::now(); + }).handle_error_interruptible( + remap_pin_iertr::pass_further{}, + crimson::ct_error::assert_all{ + "TransactionManager::remap_pin hit invalid error" + } + ).si_then([&ret, &count] { + ceph_assert(count == N); + return remap_pin_iertr::make_ready_future< + std::array>(std::move(ret)); + }); }); }); } - using reserve_extent_iertr = alloc_extent_iertr; using reserve_extent_ret = reserve_extent_iertr::future; reserve_extent_ret reserve_region( @@ -362,12 +460,55 @@ public: LOG_PREFIX(TransactionManager::reserve_region); SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}", t, len, hint); ceph_assert(is_aligned(hint, epm->get_block_size())); - return lba_manager->alloc_extent( + return lba_manager->reserve_region( t, hint, - len, - P_ADDR_ZERO, - nullptr); + len); + } + + /* + * clone_pin + * + * create an indirect lba mapping pointing to the physical + * lba mapping whose key is intermediate_key. Resort to btree_lba_manager.h + * for the definition of "indirect lba mapping" and "physical lba mapping" + * + */ + using clone_extent_iertr = alloc_extent_iertr; + using clone_extent_ret = clone_extent_iertr::future; + clone_extent_ret clone_pin( + Transaction &t, + laddr_t hint, + const LBAMapping &mapping) { + auto intermediate_key = + mapping.is_indirect() + ? mapping.get_intermediate_key() + : mapping.get_key(); + auto intermediate_base = + mapping.is_indirect() + ? mapping.get_intermediate_base() + : mapping.get_key(); + + LOG_PREFIX(TransactionManager::clone_pin); + SUBDEBUGT(seastore_tm, "len={}, laddr_hint={}, clone_offset {}", + t, mapping.get_length(), hint, intermediate_key); + ceph_assert(is_aligned(hint, epm->get_block_size())); + return lba_manager->clone_extent( + t, + hint, + mapping.get_length(), + intermediate_key, + mapping.get_val(), + intermediate_key + ).si_then([this, &t, intermediate_base](auto pin) { + return inc_ref(t, intermediate_base + ).si_then([pin=std::move(pin)](auto) mutable { + return std::move(pin); + }).handle_error_interruptible( + crimson::ct_error::input_output_error::pass_further(), + crimson::ct_error::assert_all("not possible") + ); + }); } /* alloc_extents @@ -602,6 +743,12 @@ private: ExtentPlacementManager::dispatch_result_t dispatch_result, std::optional seq_to_trim = std::nullopt); + /// Remove refcount for offset + ref_ret _dec_ref( + Transaction &t, + laddr_t offset, + bool cascade_remove); + /** * pin_to_extent * @@ -623,7 +770,9 @@ private: return cache->get_absent_extent( t, pref.get_val(), - pref.get_length(), + pref.is_indirect() ? + pref.get_intermediate_length() : + pref.get_length(), [pin=std::move(pin)] (T &extent) mutable { assert(!extent.has_laddr()); @@ -631,10 +780,11 @@ private: assert(!pin->has_been_invalidated()); assert(pin->get_parent()); pin->link_child(&extent); - extent.set_laddr(pin->get_key()); + extent.maybe_set_intermediate_laddr(*pin); } ).si_then([FNAME, &t](auto ref) mutable -> ret { SUBTRACET(seastore_tm, "got extent -- {}", t, *ref); + assert(ref->is_fully_loaded()); return pin_to_extent_ret( interruptible::ready_future_marker{}, std::move(ref)); @@ -662,7 +812,9 @@ private: type, pref.get_val(), pref.get_key(), - pref.get_length(), + pref.is_indirect() ? + pref.get_intermediate_length() : + pref.get_length(), [pin=std::move(pin)](CachedExtent &extent) mutable { auto &lextent = static_cast(extent); assert(!lextent.has_laddr()); @@ -671,16 +823,80 @@ private: assert(pin->get_parent()); assert(!pin->get_parent()->is_pending()); pin->link_child(&lextent); - lextent.set_laddr(pin->get_key()); + lextent.maybe_set_intermediate_laddr(*pin); } ).si_then([FNAME, &t](auto ref) { SUBTRACET(seastore_tm, "got extent -- {}", t, *ref); + assert(ref->is_fully_loaded()); return pin_to_extent_by_type_ret( interruptible::ready_future_marker{}, std::move(ref->template cast())); }); } + /** + * alloc_remapped_extent + * + * Allocates a new extent at given remap_paddr that must be absolute and + * use the buffer to fill the new extent if buffer exists. Otherwise, will + * not read disk to fill the new extent. + * Returns the new extent. + * + * Should make sure the end laddr of remap extent <= the end laddr of + * original extent when using this method. + */ + using alloc_remapped_extent_iertr = + alloc_extent_iertr::extend_ertr; + using alloc_remapped_extent_ret = + alloc_remapped_extent_iertr::future; + template + alloc_remapped_extent_ret alloc_remapped_extent( + Transaction &t, + laddr_t remap_laddr, + paddr_t remap_paddr, + extent_len_t remap_length, + laddr_t original_laddr, + laddr_t intermediate_base, + laddr_t intermediate_key, + std::optional &&original_bptr) { + LOG_PREFIX(TransactionManager::alloc_remapped_extent); + SUBDEBUG(seastore_tm, "alloc remapped extent: remap_laddr: {}, " + "remap_paddr: {}, remap_length: {}, has data in cache: {} ", + remap_laddr, remap_paddr, remap_length, + original_bptr.has_value() ? "true":"false"); + TCachedExtentRef ext; + auto fut = LBAManager::alloc_extent_iertr::make_ready_future< + LBAMappingRef>(); + assert((intermediate_key == L_ADDR_NULL) + == (intermediate_base == L_ADDR_NULL)); + if (intermediate_key == L_ADDR_NULL) { + // remapping direct mapping + ext = cache->alloc_remapped_extent( + t, + remap_laddr, + remap_paddr, + remap_length, + original_laddr, + std::move(original_bptr)); + fut = lba_manager->alloc_extent( + t, remap_laddr, remap_length, remap_paddr, *ext); + } else { + fut = lba_manager->clone_extent( + t, + remap_laddr, + remap_length, + intermediate_key, + remap_paddr, + intermediate_base); + } + return fut.si_then([remap_laddr, remap_length, remap_paddr](auto &&ref) { + assert(ref->get_key() == remap_laddr); + assert(ref->get_val() == remap_paddr); + assert(ref->get_length() == remap_length); + return alloc_remapped_extent_iertr::make_ready_future + (std::move(ref)); + }); + } public: // Testing interfaces diff --git a/ceph/src/crimson/osd/CMakeLists.txt b/ceph/src/crimson/osd/CMakeLists.txt index 4b24b5a65..f521e0244 100644 --- a/ceph/src/crimson/osd/CMakeLists.txt +++ b/ceph/src/crimson/osd/CMakeLists.txt @@ -2,6 +2,7 @@ add_executable(crimson-osd backfill_state.cc ec_backend.cc heartbeat.cc + lsan_suppressions.cc main.cc main_config_bootstrap_helpers.cc osd.cc diff --git a/ceph/src/crimson/osd/heartbeat.cc b/ceph/src/crimson/osd/heartbeat.cc index a728c327f..266e56533 100644 --- a/ceph/src/crimson/osd/heartbeat.cc +++ b/ceph/src/crimson/osd/heartbeat.cc @@ -236,8 +236,11 @@ void Heartbeat::ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replac } } -void Heartbeat::ms_handle_connect(crimson::net::ConnectionRef conn) +void Heartbeat::ms_handle_connect( + crimson::net::ConnectionRef conn, + seastar::shard_id prv_shard) { + ceph_assert_always(seastar::this_shard_id() == prv_shard); auto peer = conn->get_peer_id(); if (conn->get_peer_type() != entity_name_t::TYPE_OSD || peer == entity_name_t::NEW) { @@ -249,8 +252,12 @@ void Heartbeat::ms_handle_connect(crimson::net::ConnectionRef conn) } } -void Heartbeat::ms_handle_accept(crimson::net::ConnectionRef conn) +void Heartbeat::ms_handle_accept( + crimson::net::ConnectionRef conn, + seastar::shard_id prv_shard, + bool is_replace) { + ceph_assert_always(seastar::this_shard_id() == prv_shard); auto peer = conn->get_peer_id(); if (conn->get_peer_type() != entity_name_t::TYPE_OSD || peer == entity_name_t::NEW) { @@ -258,7 +265,7 @@ void Heartbeat::ms_handle_accept(crimson::net::ConnectionRef conn) } if (auto found = peers.find(peer); found != peers.end()) { - found->second.handle_accept(conn); + found->second.handle_accept(conn, is_replace); } } @@ -303,40 +310,38 @@ seastar::future<> Heartbeat::maybe_share_osdmap( Ref m) { const osd_id_t from = m->get_source().num(); - const epoch_t osdmap_epoch = service.get_map()->get_epoch(); - const epoch_t peer_epoch = m->map_epoch; + const epoch_t current_osdmap_epoch = service.get_map()->get_epoch(); auto found = peers.find(from); if (found == peers.end()) { return seastar::now(); } auto& peer = found->second; - if (peer_epoch > peer.get_last_epoch_sent()) { - logger().debug("{} updating session's last epoch sent " - "from {} to peer's (id: {}) map epoch of {}", - __func__, peer.get_last_epoch_sent(), - from, peer_epoch); - peer.set_last_epoch_sent(peer_epoch); + if (m->map_epoch > peer.get_projected_epoch()) { + logger().debug("{} updating peer {} session's projected_epoch" + "from {} to ping map epoch of {}", + __func__, from, peer.get_projected_epoch(), + m->map_epoch); + peer.set_projected_epoch(m->map_epoch); } - if (osdmap_epoch <= peer.get_last_epoch_sent()) { - logger().info("{} latest epoch sent {} is already later " - "than osdmap epoch of {}", - __func__ , peer.get_last_epoch_sent(), - osdmap_epoch); + if (current_osdmap_epoch <= peer.get_projected_epoch()) { + logger().debug("{} peer {} projected_epoch {} is already later " + "than our osdmap epoch of {}", + __func__ , from, peer.get_projected_epoch(), + current_osdmap_epoch); return seastar::now(); } - logger().info("{} peer id: {} epoch is {} while osdmap is {}", - __func__ , from, m->map_epoch, osdmap_epoch); - if (osdmap_epoch > m->map_epoch) { - logger().debug("{} sharing osdmap epoch of {} with peer id {}", - __func__, osdmap_epoch, from); - // Peer's newest map is m->map_epoch. Therfore it misses - // the osdmaps in the range of `m->map_epoch` to `osdmap_epoch`. - return service.send_incremental_map_to_osd(from, m->map_epoch); - } - return seastar::now(); + const epoch_t send_from = peer.get_projected_epoch(); + logger().debug("{} sending peer {} peer maps from projected epoch {} through " + "local osdmap epoch {}", + __func__, + from, + send_from, + current_osdmap_epoch); + peer.set_projected_epoch(current_osdmap_epoch); + return service.send_incremental_map_to_osd(from, send_from); } seastar::future<> Heartbeat::handle_reply(crimson::net::ConnectionRef conn, @@ -429,42 +434,57 @@ bool Heartbeat::Connection::matches(crimson::net::ConnectionRef _conn) const return (conn && conn == _conn); } -void Heartbeat::Connection::accepted(crimson::net::ConnectionRef accepted_conn) +bool Heartbeat::Connection::accepted( + crimson::net::ConnectionRef accepted_conn, + bool is_replace) { - if (!conn) { - if (accepted_conn->get_peer_addr() == listener.get_peer_addr(type)) { - logger().info("Heartbeat::Connection::accepted(): " - "{} racing resolved", *this); - conn = accepted_conn; - set_connected(); + ceph_assert(accepted_conn); + ceph_assert(accepted_conn != conn); + if (accepted_conn->get_peer_addr() != listener.get_peer_addr(type)) { + return false; + } + + if (is_replace) { + logger().info("Heartbeat::Connection::accepted(): " + "{} racing", *this); + racing_detected = true; + } + if (conn) { + // there is no assumption about the ordering of the reset and accept + // events for the 2 racing connections. + if (is_connected) { + logger().warn("Heartbeat::Connection::accepted(): " + "{} is accepted while connected, is_replace={}", + *this, is_replace); + conn->mark_down(); + set_unconnected(); } - } else if (conn == accepted_conn) { - set_connected(); } + conn = accepted_conn; + set_connected(); + return true; } -void Heartbeat::Connection::replaced() +void Heartbeat::Connection::reset(bool is_replace) { - assert(!is_connected); - auto replaced_conn = conn; - // set the racing connection, will be handled by handle_accept() - conn = msgr.connect(replaced_conn->get_peer_addr(), - replaced_conn->get_peer_name()); - racing_detected = true; - logger().warn("Heartbeat::Connection::replaced(): {} racing", *this); - assert(conn != replaced_conn); -} + if (is_replace) { + logger().info("Heartbeat::Connection::reset(): " + "{} racing, waiting for the replacing accept", + *this); + racing_detected = true; + } -void Heartbeat::Connection::reset() -{ - conn = nullptr; if (is_connected) { - is_connected = false; - listener.decrease_connected(); + set_unconnected(); + } else { + conn = nullptr; } - if (!racing_detected || is_winner_side) { + + if (is_replace) { + // waiting for the replacing accept event + } else if (!racing_detected || is_winner_side) { connect(); - } else { + } else { // racing_detected && !is_winner_side logger().info("Heartbeat::Connection::reset(): " "{} racing detected and lose, " "waiting for peer connect me", *this); @@ -506,11 +526,22 @@ void Heartbeat::Connection::retry() void Heartbeat::Connection::set_connected() { + assert(conn); assert(!is_connected); + ceph_assert(conn->is_connected()); is_connected = true; listener.increase_connected(); } +void Heartbeat::Connection::set_unconnected() +{ + assert(conn); + assert(is_connected); + conn = nullptr; + is_connected = false; + listener.decrease_connected(); +} + void Heartbeat::Connection::connect() { assert(!conn); @@ -600,6 +631,64 @@ void Heartbeat::Peer::send_heartbeat( } } +void Heartbeat::Peer::handle_reset( + crimson::net::ConnectionRef conn, bool is_replace) +{ + int cnt = 0; + for_each_conn([&] (auto& _conn) { + if (_conn.matches(conn)) { + ++cnt; + _conn.reset(is_replace); + } + }); + + if (cnt == 0) { + logger().info("Heartbeat::Peer::handle_reset(): {} ignores conn, is_replace={} -- {}", + *this, is_replace, *conn); + } else if (cnt > 1) { + logger().error("Heartbeat::Peer::handle_reset(): {} handles conn {} times -- {}", + *this, cnt, *conn); + } +} + +void Heartbeat::Peer::handle_connect(crimson::net::ConnectionRef conn) +{ + int cnt = 0; + for_each_conn([&] (auto& _conn) { + if (_conn.matches(conn)) { + ++cnt; + _conn.connected(); + } + }); + + if (cnt == 0) { + logger().error("Heartbeat::Peer::handle_connect(): {} ignores conn -- {}", + *this, *conn); + conn->mark_down(); + } else if (cnt > 1) { + logger().error("Heartbeat::Peer::handle_connect(): {} handles conn {} times -- {}", + *this, cnt, *conn); + } +} + +void Heartbeat::Peer::handle_accept(crimson::net::ConnectionRef conn, bool is_replace) +{ + int cnt = 0; + for_each_conn([&] (auto& _conn) { + if (_conn.accepted(conn, is_replace)) { + ++cnt; + } + }); + + if (cnt == 0) { + logger().warn("Heartbeat::Peer::handle_accept(): {} ignores conn -- {}", + *this, *conn); + } else if (cnt > 1) { + logger().error("Heartbeat::Peer::handle_accept(): {} handles conn {} times -- {}", + *this, cnt, *conn); + } +} + seastar::future<> Heartbeat::Peer::handle_reply( crimson::net::ConnectionRef conn, Ref m) { diff --git a/ceph/src/crimson/osd/heartbeat.h b/ceph/src/crimson/osd/heartbeat.h index 73fcdf788..f5da45118 100644 --- a/ceph/src/crimson/osd/heartbeat.h +++ b/ceph/src/crimson/osd/heartbeat.h @@ -52,8 +52,8 @@ public: std::optional> ms_dispatch( crimson::net::ConnectionRef conn, MessageRef m) override; void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) override; - void ms_handle_connect(crimson::net::ConnectionRef conn) override; - void ms_handle_accept(crimson::net::ConnectionRef conn) override; + void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id) override; + void ms_handle_accept(crimson::net::ConnectionRef conn, seastar::shard_id, bool is_replace) override; void print(std::ostream&) const; private: @@ -189,9 +189,8 @@ class Heartbeat::Connection { void connected() { set_connected(); } - void accepted(crimson::net::ConnectionRef); - void replaced(); - void reset(); + bool accepted(crimson::net::ConnectionRef, bool is_replace); + void reset(bool is_replace=false); seastar::future<> send(MessageURef msg); void validate(); // retry connection if still pending @@ -199,6 +198,7 @@ class Heartbeat::Connection { private: void set_connected(); + void set_unconnected(); void connect(); const osd_id_t peer; @@ -239,19 +239,15 @@ class Heartbeat::Connection { crimson::net::ConnectionRef conn; bool is_connected = false; - friend std::ostream& operator<<(std::ostream& os, const Connection& c) { - if (c.type == type_t::front) { - return os << "con_front(osd." << c.peer << ")"; - } else { - return os << "con_back(osd." << c.peer << ")"; - } - } + friend std::ostream& operator<<(std::ostream& os, const Connection& c) { + if (c.type == type_t::front) { + return os << "con_front(osd." << c.peer << ")"; + } else { + return os << "con_back(osd." << c.peer << ")"; + } + } }; -#if FMT_VERSION >= 90000 -template <> struct fmt::formatter : fmt::ostream_formatter {}; -#endif - /* * Track the ping history and ping reply (the pong) from the same session, clean up * history once hb_front or hb_back loses connection and restart the session once @@ -277,8 +273,8 @@ class Heartbeat::Session { void set_epoch_added(epoch_t epoch_) { epoch = epoch_; } epoch_t get_epoch_added() const { return epoch; } - void set_last_epoch_sent(epoch_t epoch_) { last_sent_epoch = epoch_; } - epoch_t get_last_epoch_sent() const { return last_sent_epoch; } + void set_projected_epoch(epoch_t epoch_) { projected_epoch = epoch_; } + epoch_t get_projected_epoch() const { return projected_epoch; } bool is_started() const { return connected; } bool pinged() const { @@ -389,8 +385,8 @@ class Heartbeat::Session { clock::time_point last_rx_back; // most recent epoch we wanted this peer epoch_t epoch; // rename me to epoch_added - // last epoch sent - epoch_t last_sent_epoch = 0; + // epoch we expect peer to be at once our sent incrementals are processed + epoch_t projected_epoch = 0; struct reply_t { clock::time_point deadline; @@ -414,8 +410,8 @@ class Heartbeat::Peer final : private Heartbeat::ConnectionListener { void set_epoch_added(epoch_t epoch) { session.set_epoch_added(epoch); } epoch_t get_epoch_added() const { return session.get_epoch_added(); } - void set_last_epoch_sent(epoch_t epoch) { session.set_last_epoch_sent(epoch); } - epoch_t get_last_epoch_sent() const { return session.get_last_epoch_sent(); } + void set_projected_epoch(epoch_t epoch) { session.set_projected_epoch(epoch); } + epoch_t get_projected_epoch() const { return session.get_projected_epoch(); } // if failure, return time_point since last active // else, return clock::zero() @@ -425,29 +421,12 @@ class Heartbeat::Peer final : private Heartbeat::ConnectionListener { void send_heartbeat( clock::time_point, ceph::signedspan, std::vector>&); seastar::future<> handle_reply(crimson::net::ConnectionRef, Ref); - void handle_reset(crimson::net::ConnectionRef conn, bool is_replace) { - for_each_conn([&] (auto& _conn) { - if (_conn.matches(conn)) { - if (is_replace) { - _conn.replaced(); - } else { - _conn.reset(); - } - } - }); - } - void handle_connect(crimson::net::ConnectionRef conn) { - for_each_conn([&] (auto& _conn) { - if (_conn.matches(conn)) { - _conn.connected(); - } - }); - } - void handle_accept(crimson::net::ConnectionRef conn) { - for_each_conn([&] (auto& _conn) { - _conn.accepted(conn); - }); - } + + void handle_reset(crimson::net::ConnectionRef conn, bool is_replace); + + void handle_connect(crimson::net::ConnectionRef conn); + + void handle_accept(crimson::net::ConnectionRef conn, bool is_replace); private: entity_addr_t get_peer_addr(type_t type) override; @@ -469,8 +448,14 @@ class Heartbeat::Peer final : private Heartbeat::ConnectionListener { bool pending_send = false; Connection con_front; Connection con_back; + + friend std::ostream& operator<<(std::ostream& os, const Peer& p) { + return os << "peer(osd." << p.peer << ")"; + } }; #if FMT_VERSION >= 90000 template <> struct fmt::formatter : fmt::ostream_formatter {}; +template <> struct fmt::formatter : fmt::ostream_formatter {}; +template <> struct fmt::formatter : fmt::ostream_formatter {}; #endif diff --git a/ceph/src/crimson/osd/lsan_suppressions.cc b/ceph/src/crimson/osd/lsan_suppressions.cc new file mode 100644 index 000000000..53b7eb630 --- /dev/null +++ b/ceph/src/crimson/osd/lsan_suppressions.cc @@ -0,0 +1,20 @@ +#ifndef _NDEBUG +// The callbacks we define here will be called from the sanitizer runtime, but +// aren't referenced from the Chrome executable. We must ensure that those +// callbacks are not sanitizer-instrumented, and that they aren't stripped by +// the linker. +#define SANITIZER_HOOK_ATTRIBUTE \ + extern "C" \ + __attribute__((no_sanitize("address", "thread", "undefined"))) \ + __attribute__((visibility("default"))) \ + __attribute__((used)) + +static char kLSanDefaultSuppressions[] = + "leak:InitModule\n" + "leak:MallocExtension::Initialize\n" + "leak:MallocExtension::Register\n"; + +SANITIZER_HOOK_ATTRIBUTE const char *__lsan_default_suppressions() { + return kLSanDefaultSuppressions; +} +#endif // ! _NDEBUG diff --git a/ceph/src/crimson/osd/main.cc b/ceph/src/crimson/osd/main.cc index 234259f0e..1e817415d 100644 --- a/ceph/src/crimson/osd/main.cc +++ b/ceph/src/crimson/osd/main.cc @@ -192,7 +192,8 @@ int main(int argc, const char* argv[]) make_pair(std::ref(hb_back_msgr), "hb_back"s)}) { msgr = crimson::net::Messenger::create(entity_name_t::OSD(whoami), name, - nonce); + nonce, + true); } auto store = crimson::os::FuturizedStore::create( local_conf().get_val("osd_objectstore"), diff --git a/ceph/src/crimson/osd/main_config_bootstrap_helpers.cc b/ceph/src/crimson/osd/main_config_bootstrap_helpers.cc index 0777822b9..807fd1591 100644 --- a/ceph/src/crimson/osd/main_config_bootstrap_helpers.cc +++ b/ceph/src/crimson/osd/main_config_bootstrap_helpers.cc @@ -55,7 +55,8 @@ seastar::future<> populate_config_from_mon() auto auth_handler = std::make_unique(); auto msgr = crimson::net::Messenger::create(entity_name_t::CLIENT(), "temp_mon_client", - get_nonce()); + get_nonce(), + true); crimson::mon::Client monc{*msgr, *auth_handler}; msgr->set_auth_client(&monc); msgr->start({&monc}).get(); diff --git a/ceph/src/crimson/osd/object_context_loader.cc b/ceph/src/crimson/osd/object_context_loader.cc index 4cdbda787..0a4d74c0d 100644 --- a/ceph/src/crimson/osd/object_context_loader.cc +++ b/ceph/src/crimson/osd/object_context_loader.cc @@ -82,11 +82,11 @@ using crimson::common::local_conf; template ObjectContextLoader::load_obc_iertr::future<> - ObjectContextLoader::with_head_and_clone_obc( + ObjectContextLoader::with_clone_obc_direct( hobject_t oid, with_both_obc_func_t&& func) { - LOG_PREFIX(ObjectContextLoader::with_head_and_clone_obc); + LOG_PREFIX(ObjectContextLoader::with_clone_obc_direct); assert(!oid.is_head()); return with_obc( oid.get_head(), @@ -98,14 +98,13 @@ using crimson::common::local_conf; crimson::ct_error::enoent::make() }; } - auto coid = resolve_oid(head->get_head_ss(), oid); - if (!coid) { - ERRORDPP("clone {} not found", dpp, oid); - return load_obc_iertr::future<>{ - crimson::ct_error::enoent::make() - }; - } - auto [clone, existed] = obc_registry.get_cached_obc(*coid); +#ifndef NDEBUG + auto &ss = head->get_head_ss(); + auto cit = std::find( + std::begin(ss.clones), std::end(ss.clones), oid.snap); + assert(cit != std::end(ss.clones)); +#endif + auto [clone, existed] = obc_registry.get_cached_obc(oid); return clone->template with_lock( [existed=existed, clone=std::move(clone), func=std::move(func), head=std::move(head), this]() @@ -227,7 +226,7 @@ using crimson::common::local_conf; with_obc_func_t&&); template ObjectContextLoader::load_obc_iertr::future<> - ObjectContextLoader::with_head_and_clone_obc( + ObjectContextLoader::with_clone_obc_direct( hobject_t, with_both_obc_func_t&&); } diff --git a/ceph/src/crimson/osd/object_context_loader.h b/ceph/src/crimson/osd/object_context_loader.h index 82e2c1e4d..3ab7f6ad8 100644 --- a/ceph/src/crimson/osd/object_context_loader.h +++ b/ceph/src/crimson/osd/object_context_loader.h @@ -53,7 +53,7 @@ public: // object *and* the matching clone object are being used // in func. template - load_obc_iertr::future<> with_head_and_clone_obc( + load_obc_iertr::future<> with_clone_obc_direct( hobject_t oid, with_both_obc_func_t&& func); diff --git a/ceph/src/crimson/osd/osd.cc b/ceph/src/crimson/osd/osd.cc index 735b6d777..cfe4f54ab 100644 --- a/ceph/src/crimson/osd/osd.cc +++ b/ceph/src/crimson/osd/osd.cc @@ -95,6 +95,9 @@ OSD::OSD(int id, uint32_t nonce, monc{new crimson::mon::Client{*public_msgr, *this}}, mgrc{new crimson::mgr::Client{*public_msgr, *this}}, store{store}, + pg_shard_manager{osd_singleton_state, + shard_services, + pg_to_shard_mappings}, // do this in background -- continuation rearms timer when complete tick_timer{[this] { std::ignore = update_heartbeat_peers( @@ -108,6 +111,7 @@ OSD::OSD(int id, uint32_t nonce, log_client(cluster_msgr.get(), LogClient::NO_FLAGS), clog(log_client.create_channel()) { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); for (auto msgr : {std::ref(cluster_msgr), std::ref(public_msgr), std::ref(hb_front_msgr), std::ref(hb_back_msgr)}) { msgr.get()->set_auth_server(monc.get()); @@ -159,6 +163,7 @@ CompatSet get_osd_initial_compat_set() seastar::future<> OSD::open_meta_coll() { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return store.get_sharded_store().open_collection( coll_t::meta() ).then([this](auto ch) { @@ -354,11 +359,27 @@ seastar::future<> OSD::start() logger().info("start"); startup_time = ceph::mono_clock::now(); - + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return store.start().then([this] { - return pg_shard_manager.start( - whoami, *cluster_msgr, - *public_msgr, *monc, *mgrc, store); + return pg_to_shard_mappings.start(0, seastar::smp::count + ).then([this] { + return osd_singleton_state.start_single( + whoami, std::ref(*cluster_msgr), std::ref(*public_msgr), + std::ref(*monc), std::ref(*mgrc)); + }).then([this] { + return osd_states.start(); + }).then([this] { + ceph::mono_time startup_time = ceph::mono_clock::now(); + return shard_services.start( + std::ref(osd_singleton_state), + std::ref(pg_to_shard_mappings), + whoami, + startup_time, + osd_singleton_state.local().perf, + osd_singleton_state.local().recoverystate_perf, + std::ref(store), + std::ref(osd_states)); + }); }).then([this] { heartbeat.reset(new Heartbeat{ whoami, get_shard_services(), @@ -385,11 +406,13 @@ seastar::future<> OSD::start() osdmap = make_local_shared_foreign(OSDMapService::local_cached_map_t(map)); return pg_shard_manager.update_map(std::move(map)); }).then([this] { - pg_shard_manager.got_map(osdmap->get_epoch()); + return shard_services.invoke_on_all([this](auto &local_service) { + local_service.local_state.osdmap_gate.got_map(osdmap->get_epoch()); + }); + }).then([this] { bind_epoch = osdmap->get_epoch(); return pg_shard_manager.load_pgs(store); }).then([this] { - uint64_t osd_required = CEPH_FEATURE_UID | CEPH_FEATURE_PGID64 | @@ -584,9 +607,11 @@ seastar::future<> OSD::_add_me_to_crush() }); } -seastar::future<> OSD::handle_command(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_command( + crimson::net::ConnectionRef conn, + Ref m) { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return asok->handle_command(conn, std::move(m)); } @@ -637,7 +662,8 @@ seastar::future<> OSD::stop() tick_timer.cancel(); // see also OSD::shutdown() return prepare_to_stop().then([this] { - pg_shard_manager.set_stopping(); + return pg_shard_manager.set_stopping(); + }).then([this] { logger().debug("prepared to stop"); public_msgr->stop(); cluster_msgr->stop(); @@ -657,7 +683,13 @@ seastar::future<> OSD::stop() }).then([this] { return mgrc->stop(); }).then([this] { - return pg_shard_manager.stop(); + return shard_services.stop(); + }).then([this] { + return osd_states.stop(); + }).then([this] { + return osd_singleton_state.stop(); + }).then([this] { + return pg_to_shard_mappings.stop(); }).then([fut=std::move(gate_close_fut)]() mutable { return std::move(fut); }).then([this] { @@ -697,77 +729,104 @@ std::optional> OSD::ms_dispatch(crimson::net::ConnectionRef conn, MessageRef m) { if (pg_shard_manager.is_stopping()) { - return {}; + return seastar::now(); } - // XXX: we're assuming the `switch` part is executed immediately, and thus - // we won't smash the stack. Taking into account how `seastar::with_gate` - // is currently implemented, this seems to be the case (Summer 2022). - bool dispatched = true; - gate.dispatch_in_background(__func__, *this, [this, conn=std::move(conn), - m=std::move(m), &dispatched] { + auto maybe_ret = do_ms_dispatch(conn, std::move(m)); + if (!maybe_ret.has_value()) { + return std::nullopt; + } + + gate.dispatch_in_background( + __func__, *this, [ret=std::move(maybe_ret.value())]() mutable { + return std::move(ret); + }); + return seastar::now(); +} + +std::optional> +OSD::do_ms_dispatch( + crimson::net::ConnectionRef conn, + MessageRef m) +{ + if (seastar::this_shard_id() != PRIMARY_CORE) { switch (m->get_type()) { case CEPH_MSG_OSD_MAP: - return handle_osd_map(conn, boost::static_pointer_cast(m)); - case CEPH_MSG_OSD_OP: - return handle_osd_op(conn, boost::static_pointer_cast(m)); - case MSG_OSD_PG_CREATE2: - return handle_pg_create( - conn, boost::static_pointer_cast(m)); - return seastar::now(); case MSG_COMMAND: - return handle_command(conn, boost::static_pointer_cast(m)); case MSG_OSD_MARK_ME_DOWN: - return handle_mark_me_down(conn, boost::static_pointer_cast(m)); - case MSG_OSD_PG_PULL: - [[fallthrough]]; - case MSG_OSD_PG_PUSH: - [[fallthrough]]; - case MSG_OSD_PG_PUSH_REPLY: - [[fallthrough]]; - case MSG_OSD_PG_RECOVERY_DELETE: - [[fallthrough]]; - case MSG_OSD_PG_RECOVERY_DELETE_REPLY: - [[fallthrough]]; - case MSG_OSD_PG_SCAN: - [[fallthrough]]; - case MSG_OSD_PG_BACKFILL: - [[fallthrough]]; - case MSG_OSD_PG_BACKFILL_REMOVE: - return handle_recovery_subreq(conn, boost::static_pointer_cast(m)); - case MSG_OSD_PG_LEASE: - [[fallthrough]]; - case MSG_OSD_PG_LEASE_ACK: - [[fallthrough]]; - case MSG_OSD_PG_NOTIFY2: - [[fallthrough]]; - case MSG_OSD_PG_INFO2: - [[fallthrough]]; - case MSG_OSD_PG_QUERY2: - [[fallthrough]]; - case MSG_OSD_BACKFILL_RESERVE: - [[fallthrough]]; - case MSG_OSD_RECOVERY_RESERVE: - [[fallthrough]]; - case MSG_OSD_PG_LOG: - return handle_peering_op(conn, boost::static_pointer_cast(m)); - case MSG_OSD_REPOP: - return handle_rep_op(conn, boost::static_pointer_cast(m)); - case MSG_OSD_REPOPREPLY: - return handle_rep_op_reply(conn, boost::static_pointer_cast(m)); - case MSG_OSD_SCRUB2: - return handle_scrub(conn, boost::static_pointer_cast(m)); - case MSG_OSD_PG_UPDATE_LOG_MISSING: - return handle_update_log_missing(conn, boost::static_pointer_cast< - MOSDPGUpdateLogMissing>(m)); - case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY: - return handle_update_log_missing_reply(conn, boost::static_pointer_cast< - MOSDPGUpdateLogMissingReply>(m)); - default: - dispatched = false; - return seastar::now(); + // FIXME: order is not guaranteed in this path + return conn.get_foreign( + ).then([this, m=std::move(m)](auto f_conn) { + return seastar::smp::submit_to(PRIMARY_CORE, + [f_conn=std::move(f_conn), m=std::move(m), this]() mutable { + auto conn = make_local_shared_foreign(std::move(f_conn)); + auto ret = do_ms_dispatch(conn, std::move(m)); + assert(ret.has_value()); + return std::move(ret.value()); + }); + }); } - }); - return (dispatched ? std::make_optional(seastar::now()) : std::nullopt); + } + + switch (m->get_type()) { + case CEPH_MSG_OSD_MAP: + return handle_osd_map(boost::static_pointer_cast(m)); + case CEPH_MSG_OSD_OP: + return handle_osd_op(conn, boost::static_pointer_cast(m)); + case MSG_OSD_PG_CREATE2: + return handle_pg_create( + conn, boost::static_pointer_cast(m)); + return seastar::now(); + case MSG_COMMAND: + return handle_command(conn, boost::static_pointer_cast(m)); + case MSG_OSD_MARK_ME_DOWN: + return handle_mark_me_down(conn, boost::static_pointer_cast(m)); + case MSG_OSD_PG_PULL: + [[fallthrough]]; + case MSG_OSD_PG_PUSH: + [[fallthrough]]; + case MSG_OSD_PG_PUSH_REPLY: + [[fallthrough]]; + case MSG_OSD_PG_RECOVERY_DELETE: + [[fallthrough]]; + case MSG_OSD_PG_RECOVERY_DELETE_REPLY: + [[fallthrough]]; + case MSG_OSD_PG_SCAN: + [[fallthrough]]; + case MSG_OSD_PG_BACKFILL: + [[fallthrough]]; + case MSG_OSD_PG_BACKFILL_REMOVE: + return handle_recovery_subreq(conn, boost::static_pointer_cast(m)); + case MSG_OSD_PG_LEASE: + [[fallthrough]]; + case MSG_OSD_PG_LEASE_ACK: + [[fallthrough]]; + case MSG_OSD_PG_NOTIFY2: + [[fallthrough]]; + case MSG_OSD_PG_INFO2: + [[fallthrough]]; + case MSG_OSD_PG_QUERY2: + [[fallthrough]]; + case MSG_OSD_BACKFILL_RESERVE: + [[fallthrough]]; + case MSG_OSD_RECOVERY_RESERVE: + [[fallthrough]]; + case MSG_OSD_PG_LOG: + return handle_peering_op(conn, boost::static_pointer_cast(m)); + case MSG_OSD_REPOP: + return handle_rep_op(conn, boost::static_pointer_cast(m)); + case MSG_OSD_REPOPREPLY: + return handle_rep_op_reply(conn, boost::static_pointer_cast(m)); + case MSG_OSD_SCRUB2: + return handle_scrub(conn, boost::static_pointer_cast(m)); + case MSG_OSD_PG_UPDATE_LOG_MISSING: + return handle_update_log_missing(conn, boost::static_pointer_cast< + MOSDPGUpdateLogMissing>(m)); + case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY: + return handle_update_log_missing_reply(conn, boost::static_pointer_cast< + MOSDPGUpdateLogMissingReply>(m)); + default: + return std::nullopt; + } } void OSD::ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) @@ -841,20 +900,26 @@ uint64_t OSD::send_pg_stats() return osd_stat.seq; } -bool OSD::require_mon_peer(crimson::net::Connection *conn, Ref m) +seastar::future<> OSD::handle_osd_map(Ref m) { - if (!conn->peer_is_mon()) { - logger().info("{} received from non-mon {}, {}", - __func__, - conn->get_peer_addr(), - *m); - return false; - } - return true; + /* Ensure that only one MOSDMap is processed at a time. Allowing concurrent + * processing may eventually be worthwhile, but such an implementation would + * need to ensure (among other things) + * 1. any particular map is only processed once + * 2. PGAdvanceMap operations are processed in order for each PG + * As map handling is not presently a bottleneck, we stick to this + * simpler invariant for now. + * See https://tracker.ceph.com/issues/59165 + */ + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); + return handle_osd_map_lock.lock().then([this, m] { + return _handle_osd_map(m); + }).finally([this] { + return handle_osd_map_lock.unlock(); + }); } -seastar::future<> OSD::handle_osd_map(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::_handle_osd_map(Ref m) { logger().info("handle_osd_map {}", *m); if (m->fsid != superblock.cluster_fsid) { @@ -926,10 +991,12 @@ seastar::future<> OSD::handle_osd_map(crimson::net::ConnectionRef conn, }); } -seastar::future<> OSD::committed_osd_maps(version_t first, - version_t last, - Ref m) +seastar::future<> OSD::committed_osd_maps( + version_t first, + version_t last, + Ref m) { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); logger().info("osd.{}: committed_osd_maps({}, {})", whoami, first, last); // advance through the new maps return seastar::do_for_each(boost::make_counting_iterator(first), @@ -956,6 +1023,7 @@ seastar::future<> OSD::committed_osd_maps(version_t first, } }); }).then([m, this] { + auto fut = seastar::now(); if (osdmap->is_up(whoami)) { const auto up_from = osdmap->get_up_from(whoami); logger().info("osd.{}: map e {} marked me up: up_from {}, bind_epoch {}, state {}", @@ -965,12 +1033,13 @@ seastar::future<> OSD::committed_osd_maps(version_t first, osdmap->get_addrs(whoami) == public_msgr->get_myaddrs() && pg_shard_manager.is_booting()) { logger().info("osd.{}: activating...", whoami); - pg_shard_manager.set_active(); - beacon_timer.arm_periodic( - std::chrono::seconds(local_conf()->osd_beacon_report_interval)); - // timer continuation rearms when complete - tick_timer.arm( - std::chrono::seconds(TICK_INTERVAL)); + fut = pg_shard_manager.set_active().then([this] { + beacon_timer.arm_periodic( + std::chrono::seconds(local_conf()->osd_beacon_report_interval)); + // timer continuation rearms when complete + tick_timer.arm( + std::chrono::seconds(TICK_INTERVAL)); + }); } } else { if (pg_shard_manager.is_prestop()) { @@ -978,9 +1047,13 @@ seastar::future<> OSD::committed_osd_maps(version_t first, return seastar::now(); } } - return check_osdmap_features().then([this] { - // yay! - return pg_shard_manager.broadcast_map_to_pgs(osdmap->get_epoch()); + return fut.then([this] { + return check_osdmap_features().then([this] { + // yay! + logger().info("osd.{}: committed_osd_maps: broadcasting osdmaps up" + " to {} epoch to pgs", whoami, osdmap->get_epoch()); + return pg_shard_manager.broadcast_map_to_pgs(osdmap->get_epoch()); + }); }); }).then([m, this] { if (pg_shard_manager.is_active()) { @@ -1013,20 +1086,22 @@ seastar::future<> OSD::committed_osd_maps(version_t first, }); } -seastar::future<> OSD::handle_osd_op(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_osd_op( + crimson::net::ConnectionRef conn, + Ref m) { - (void) pg_shard_manager.start_pg_operation( + return pg_shard_manager.start_pg_operation( get_shard_services(), conn, - std::move(m)); - return seastar::now(); + std::move(m)).second; } -seastar::future<> OSD::handle_pg_create(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_pg_create( + crimson::net::ConnectionRef conn, + Ref m) { - for (auto& [pgid, when] : m->pgs) { + return seastar::do_for_each(m->pgs, [this, conn, m](auto& pg) { + auto& [pgid, when] = pg; const auto &[created, created_stamp] = when; auto q = m->pg_extra.find(pgid); ceph_assert(q != m->pg_extra.end()); @@ -1043,8 +1118,9 @@ seastar::future<> OSD::handle_pg_create(crimson::net::ConnectionRef conn, "unmatched past_intervals {} (history {})", pgid, m->epoch, pi, history); + return seastar::now(); } else { - std::ignore = pg_shard_manager.start_pg_operation( + return pg_shard_manager.start_pg_operation( conn, pg_shard_t(), pgid, @@ -1052,10 +1128,9 @@ seastar::future<> OSD::handle_pg_create(crimson::net::ConnectionRef conn, m->epoch, NullEvt(), true, - new PGCreateInfo(pgid, m->epoch, history, pi, true)); + new PGCreateInfo(pgid, m->epoch, history, pi, true)).second; } - } - return seastar::now(); + }); } seastar::future<> OSD::handle_update_log_missing( @@ -1063,10 +1138,9 @@ seastar::future<> OSD::handle_update_log_missing( Ref m) { m->decode_payload(); - (void) pg_shard_manager.start_pg_operation( + return pg_shard_manager.start_pg_operation( std::move(conn), - std::move(m)); - return seastar::now(); + std::move(m)).second; } seastar::future<> OSD::handle_update_log_missing_reply( @@ -1074,24 +1148,24 @@ seastar::future<> OSD::handle_update_log_missing_reply( Ref m) { m->decode_payload(); - (void) pg_shard_manager.start_pg_operation( + return pg_shard_manager.start_pg_operation( std::move(conn), - std::move(m)); - return seastar::now(); + std::move(m)).second; } -seastar::future<> OSD::handle_rep_op(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_rep_op( + crimson::net::ConnectionRef conn, + Ref m) { m->finish_decode(); - std::ignore = pg_shard_manager.start_pg_operation( + return pg_shard_manager.start_pg_operation( std::move(conn), - std::move(m)); - return seastar::now(); + std::move(m)).second; } -seastar::future<> OSD::handle_rep_op_reply(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_rep_op_reply( + crimson::net::ConnectionRef conn, + Ref m) { spg_t pgid = m->get_spg(); return pg_shard_manager.with_pg( @@ -1107,8 +1181,9 @@ seastar::future<> OSD::handle_rep_op_reply(crimson::net::ConnectionRef conn, }); } -seastar::future<> OSD::handle_scrub(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_scrub( + crimson::net::ConnectionRef conn, + Ref m) { if (m->fsid != superblock.cluster_fsid) { logger().warn("fsid mismatched"); @@ -1127,21 +1202,23 @@ seastar::future<> OSD::handle_scrub(crimson::net::ConnectionRef conn, }); } -seastar::future<> OSD::handle_mark_me_down(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_mark_me_down( + crimson::net::ConnectionRef conn, + Ref m) { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); if (pg_shard_manager.is_prestop()) { got_stop_ack(); } return seastar::now(); } -seastar::future<> OSD::handle_recovery_subreq(crimson::net::ConnectionRef conn, - Ref m) +seastar::future<> OSD::handle_recovery_subreq( + crimson::net::ConnectionRef conn, + Ref m) { - std::ignore = pg_shard_manager.start_pg_operation( - conn, std::move(m)); - return seastar::now(); + return pg_shard_manager.start_pg_operation( + conn, std::move(m)).second; } bool OSD::should_restart() const @@ -1234,18 +1311,19 @@ seastar::future<> OSD::handle_peering_op( logger().debug("handle_peering_op on {} from {}", m->get_spg(), from); m->set_features(conn->get_features()); std::unique_ptr evt(m->get_event()); - (void) pg_shard_manager.start_pg_operation( + return pg_shard_manager.start_pg_operation( conn, pg_shard_t{from, m->get_spg().shard}, m->get_spg(), - std::move(*evt)); - return seastar::now(); + std::move(*evt)).second; } seastar::future<> OSD::check_osdmap_features() { - return store.write_meta("require_osd_release", - stringify((int)osdmap->require_osd_release)); + assert(seastar::this_shard_id() == PRIMARY_CORE); + return store.write_meta( + "require_osd_release", + stringify((int)osdmap->require_osd_release)); } seastar::future<> OSD::prepare_to_stop() diff --git a/ceph/src/crimson/osd/osd.h b/ceph/src/crimson/osd/osd.h index b3933e80c..10ff60d47 100644 --- a/ceph/src/crimson/osd/osd.h +++ b/ceph/src/crimson/osd/osd.h @@ -24,6 +24,7 @@ #include "crimson/osd/osdmap_gate.h" #include "crimson/osd/pg_map.h" #include "crimson/osd/osd_operations/peering_event.h" +#include "crimson/osd/state.h" #include "messages/MOSDOp.h" #include "osd/PeeringState.h" @@ -90,6 +91,8 @@ class OSD final : public crimson::net::Dispatcher, ceph::mono_time startup_time; + seastar::shared_mutex handle_osd_map_lock; + OSDSuperblock superblock; // Dispatcher methods @@ -97,6 +100,8 @@ class OSD final : public crimson::net::Dispatcher, void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) final; void ms_handle_remote_reset(crimson::net::ConnectionRef conn) final; + std::optional> do_ms_dispatch(crimson::net::ConnectionRef, MessageRef); + // mgr::WithStats methods // pg statistics including osd ones osd_stat_t osd_stat; @@ -108,6 +113,11 @@ class OSD final : public crimson::net::Dispatcher, void handle_authentication(const EntityName& name, const AuthCapsInfo& caps) final; + seastar::sharded pg_to_shard_mappings; + seastar::sharded osd_singleton_state; + seastar::sharded osd_states; + seastar::sharded shard_services; + crimson::osd::PGShardManager pg_shard_manager; std::unique_ptr heartbeat; @@ -126,6 +136,10 @@ public: crimson::net::MessengerRef hb_back_msgr); ~OSD() final; + auto &get_pg_shard_manager() { + return pg_shard_manager; + } + seastar::future<> open_meta_coll(); static seastar::future open_or_create_meta_coll( crimson::os::FuturizedStore &store @@ -146,6 +160,10 @@ public: /// @return the seq id of the pg stats being sent uint64_t send_pg_stats(); + auto &get_shard_services() { + return shard_services.local(); + } + private: static seastar::future<> _write_superblock( crimson::os::FuturizedStore &store, @@ -161,29 +179,29 @@ private: seastar::future<> osdmap_subscribe(version_t epoch, bool force_request); + seastar::future<> start_asok_admin(); + void write_superblock(ceph::os::Transaction& t); seastar::future<> read_superblock(); - bool require_mon_peer(crimson::net::Connection *conn, Ref m); - - seastar::future<> handle_osd_map(crimson::net::ConnectionRef conn, - Ref m); + seastar::future<> handle_osd_map(Ref m); + seastar::future<> _handle_osd_map(Ref m); seastar::future<> handle_pg_create(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> handle_osd_op(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> handle_rep_op(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> handle_rep_op_reply(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> handle_peering_op(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> handle_recovery_subreq(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> handle_scrub(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> handle_mark_me_down(crimson::net::ConnectionRef conn, - Ref m); + Ref m); seastar::future<> committed_osd_maps(version_t first, version_t last, @@ -192,20 +210,12 @@ private: seastar::future<> check_osdmap_features(); seastar::future<> handle_command(crimson::net::ConnectionRef conn, - Ref m); - seastar::future<> start_asok_admin(); + Ref m); seastar::future<> handle_update_log_missing(crimson::net::ConnectionRef conn, Ref m); seastar::future<> handle_update_log_missing_reply( crimson::net::ConnectionRef conn, Ref m); -public: - auto &get_pg_shard_manager() { - return pg_shard_manager; - } - ShardServices &get_shard_services() { - return pg_shard_manager.get_shard_services(); - } private: crimson::common::Gated gate; diff --git a/ceph/src/crimson/osd/osd_meta.cc b/ceph/src/crimson/osd/osd_meta.cc index aa30b8190..e40b2b246 100644 --- a/ceph/src/crimson/osd/osd_meta.cc +++ b/ceph/src/crimson/osd/osd_meta.cc @@ -30,8 +30,8 @@ seastar::future OSDMeta::load_map(epoch_t e) osdmap_oid(e), 0, 0, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED).handle_error( read_errorator::all_same_way([e] { - throw std::runtime_error(fmt::format("read gave enoent on {}", - osdmap_oid(e))); + ceph_abort_msg(fmt::format("{} read gave enoent on {}", + __func__, osdmap_oid(e))); })); } diff --git a/ceph/src/crimson/osd/osd_operations/background_recovery.cc b/ceph/src/crimson/osd/osd_operations/background_recovery.cc index 41ad87a32..953ec9595 100644 --- a/ceph/src/crimson/osd/osd_operations/background_recovery.cc +++ b/ceph/src/crimson/osd/osd_operations/background_recovery.cc @@ -172,7 +172,7 @@ PglogBasedRecovery::do_recovery() }); } -PGPeeringPipeline &BackfillRecovery::bp(PG &pg) +PGPeeringPipeline &BackfillRecovery::peering_pp(PG &pg) { return pg.peering_request_pg_pipeline; } @@ -193,7 +193,7 @@ BackfillRecovery::do_recovery() // with the backfill_pipeline we protect it from a second entry from // the implementation of BackfillListener. // additionally, this stage serves to synchronize with PeeringEvent. - bp(*pg).process + peering_pp(*pg).process ).then_interruptible([this] { pg->get_recovery_handler()->dispatch_backfill_event(std::move(evt)); return seastar::make_ready_future(false); diff --git a/ceph/src/crimson/osd/osd_operations/background_recovery.h b/ceph/src/crimson/osd/osd_operations/background_recovery.h index 4a1ea1900..17f2cd57a 100644 --- a/ceph/src/crimson/osd/osd_operations/background_recovery.h +++ b/ceph/src/crimson/osd/osd_operations/background_recovery.h @@ -116,7 +116,7 @@ private: boost::intrusive_ptr evt; PipelineHandle handle; - static PGPeeringPipeline &bp(PG &pg); + static PGPeeringPipeline &peering_pp(PG &pg); interruptible_future do_recovery() override; }; diff --git a/ceph/src/crimson/osd/osd_operations/client_request.cc b/ceph/src/crimson/osd/osd_operations/client_request.cc index 9be2108bc..9374fbde2 100644 --- a/ceph/src/crimson/osd/osd_operations/client_request.cc +++ b/ceph/src/crimson/osd/osd_operations/client_request.cc @@ -11,6 +11,7 @@ #include "crimson/osd/osd_operation_external_tracking.h" #include "crimson/osd/osd_operations/client_request.h" #include "crimson/osd/osd_connection_priv.h" +#include "osd/object_state_fmt.h" namespace { seastar::logger& logger() { @@ -80,7 +81,7 @@ ConnectionPipeline &ClientRequest::get_connection_pipeline() return get_osd_priv(conn.get()).client_request_conn_pipeline; } -ClientRequest::PGPipeline &ClientRequest::pp(PG &pg) +ClientRequest::PGPipeline &ClientRequest::client_pp(PG &pg) { return pg.request_pg_pipeline; } @@ -117,7 +118,7 @@ seastar::future<> ClientRequest::with_pg_int( return interruptor::now(); }); } - return ihref.enter_stage(pp(pg).await_map, *this + return ihref.enter_stage(client_pp(pg).await_map, *this ).then_interruptible([this, this_instance_id, &pg, &ihref] { logger().debug("{}.{}: after await_map stage", *this, this_instance_id); return ihref.enter_blocker( @@ -125,7 +126,7 @@ seastar::future<> ClientRequest::with_pg_int( m->get_min_epoch(), nullptr); }).then_interruptible([this, this_instance_id, &pg, &ihref](auto map) { logger().debug("{}.{}: after wait_for_map", *this, this_instance_id); - return ihref.enter_stage(pp(pg).wait_for_active, *this); + return ihref.enter_stage(client_pp(pg).wait_for_active, *this); }).then_interruptible([this, this_instance_id, &pg, &ihref]() { logger().debug( "{}.{}: after wait_for_active stage", *this, this_instance_id); @@ -196,7 +197,7 @@ ClientRequest::interruptible_future<> ClientRequest::process_op(instance_handle_t &ihref, Ref &pg) { return ihref.enter_stage( - pp(*pg).recover_missing, + client_pp(*pg).recover_missing, *this ).then_interruptible( [this, pg]() mutable { @@ -218,7 +219,7 @@ ClientRequest::process_op(instance_handle_t &ihref, Ref &pg) reply->set_reply_versions(completed->version, completed->user_version); return conn->send(std::move(reply)); } else { - return ihref.enter_stage(pp(*pg).get_obc, *this + return ihref.enter_stage(client_pp(*pg).get_obc, *this ).then_interruptible( [this, pg, &ihref]() mutable -> PG::load_obc_iertr::future<> { logger().debug("{}: in get_obc stage", *this); @@ -226,11 +227,13 @@ ClientRequest::process_op(instance_handle_t &ihref, Ref &pg) return pg->with_locked_obc( m->get_hobj(), op_info, [this, pg, &ihref](auto obc) mutable { - return ihref.enter_stage(pp(*pg).process, *this - ).then_interruptible([this, pg, obc, &ihref]() mutable { - return do_process(ihref, pg, obc); + logger().debug("{}: got obc {}", *this, obc->obs); + return ihref.enter_stage( + client_pp(*pg).process, *this + ).then_interruptible([this, pg, obc, &ihref]() mutable { + return do_process(ihref, pg, obc); + }); }); - }); }); } }); @@ -311,12 +314,12 @@ ClientRequest::do_process( return pg->do_osd_ops(m, conn, obc, op_info, snapc).safe_then_unpack_interruptible( [this, pg, &ihref](auto submitted, auto all_completed) mutable { return submitted.then_interruptible([this, pg, &ihref] { - return ihref.enter_stage(pp(*pg).wait_repop, *this); + return ihref.enter_stage(client_pp(*pg).wait_repop, *this); }).then_interruptible( [this, pg, all_completed=std::move(all_completed), &ihref]() mutable { return all_completed.safe_then_interruptible( [this, pg, &ihref](MURef reply) { - return ihref.enter_stage(pp(*pg).send_reply, *this + return ihref.enter_stage(client_pp(*pg).send_reply, *this ).then_interruptible( [this, reply=std::move(reply)]() mutable { logger().debug("{}: sending response", *this); diff --git a/ceph/src/crimson/osd/osd_operations/client_request.h b/ceph/src/crimson/osd/osd_operations/client_request.h index 4338ac416..b2dce1e87 100644 --- a/ceph/src/crimson/osd/osd_operations/client_request.h +++ b/ceph/src/crimson/osd/osd_operations/client_request.h @@ -244,7 +244,7 @@ private: Ref &pg); bool is_pg_op() const; - PGPipeline &pp(PG &pg); + PGPipeline &client_pp(PG &pg); template using interruptible_errorator = diff --git a/ceph/src/crimson/osd/osd_operations/internal_client_request.cc b/ceph/src/crimson/osd/osd_operations/internal_client_request.cc index e71804d88..1e9b842b2 100644 --- a/ceph/src/crimson/osd/osd_operations/internal_client_request.cc +++ b/ceph/src/crimson/osd/osd_operations/internal_client_request.cc @@ -43,7 +43,7 @@ void InternalClientRequest::dump_detail(Formatter *f) const { } -CommonPGPipeline& InternalClientRequest::pp() +CommonPGPipeline& InternalClientRequest::client_pp() { return pg->request_pg_pipeline; } @@ -56,7 +56,7 @@ seastar::future<> InternalClientRequest::start() logger().debug("{}: in repeat", *this); return interruptor::with_interruption([this]() mutable { return enter_stage( - pp().wait_for_active + client_pp().wait_for_active ).then_interruptible([this] { return with_blocking_event([this] (auto&& trigger) { @@ -64,12 +64,12 @@ seastar::future<> InternalClientRequest::start() }); }).then_interruptible([this] { return enter_stage( - pp().recover_missing); + client_pp().recover_missing); }).then_interruptible([this] { return do_recover_missing(pg, get_target_oid()); }).then_interruptible([this] { return enter_stage( - pp().get_obc); + client_pp().get_obc); }).then_interruptible([this] () -> PG::load_obc_iertr::future<> { logger().debug("{}: getting obc lock", *this); return seastar::do_with(create_osd_ops(), @@ -81,7 +81,8 @@ seastar::future<> InternalClientRequest::start() assert(ret == 0); return pg->with_locked_obc(get_target_oid(), op_info, [&osd_ops, this](auto obc) { - return enter_stage(pp().process).then_interruptible( + return enter_stage(client_pp().process + ).then_interruptible( [obc=std::move(obc), &osd_ops, this] { return pg->do_osd_ops( std::move(obc), diff --git a/ceph/src/crimson/osd/osd_operations/internal_client_request.h b/ceph/src/crimson/osd/osd_operations/internal_client_request.h index ca78905ea..8eed12e05 100644 --- a/ceph/src/crimson/osd/osd_operations/internal_client_request.h +++ b/ceph/src/crimson/osd/osd_operations/internal_client_request.h @@ -39,7 +39,7 @@ private: void print(std::ostream &) const final; void dump_detail(Formatter *f) const final; - CommonPGPipeline& pp(); + CommonPGPipeline& client_pp(); seastar::future<> do_process(); diff --git a/ceph/src/crimson/osd/osd_operations/logmissing_request.cc b/ceph/src/crimson/osd/osd_operations/logmissing_request.cc index 5dfb290f9..739b46406 100644 --- a/ceph/src/crimson/osd/osd_operations/logmissing_request.cc +++ b/ceph/src/crimson/osd/osd_operations/logmissing_request.cc @@ -49,7 +49,7 @@ ConnectionPipeline &LogMissingRequest::get_connection_pipeline() return get_osd_priv(conn.get()).replicated_request_conn_pipeline; } -ClientRequest::PGPipeline &LogMissingRequest::pp(PG &pg) +ClientRequest::PGPipeline &LogMissingRequest::client_pp(PG &pg) { return pg.request_pg_pipeline; } @@ -61,7 +61,18 @@ seastar::future<> LogMissingRequest::with_pg( IRef ref = this; return interruptor::with_interruption([this, pg] { - return pg->do_update_log_missing(req, conn); + logger().debug("{}: pg present", *this); + return this->template enter_stage(client_pp(*pg).await_map + ).then_interruptible([this, pg] { + return this->template with_blocking_event< + PG_OSDMapGate::OSDMapBlocker::BlockingEvent + >([this, pg](auto &&trigger) { + return pg->osdmap_gate.wait_for_map( + std::move(trigger), req->min_epoch); + }); + }).then_interruptible([this, pg](auto) { + return pg->do_update_log_missing(req, conn); + }); }, [ref](std::exception_ptr) { return seastar::now(); }, pg); } diff --git a/ceph/src/crimson/osd/osd_operations/logmissing_request.h b/ceph/src/crimson/osd/osd_operations/logmissing_request.h index 4ab87996f..71d0816fd 100644 --- a/ceph/src/crimson/osd/osd_operations/logmissing_request.h +++ b/ceph/src/crimson/osd/osd_operations/logmissing_request.h @@ -59,12 +59,14 @@ public: ConnectionPipeline::AwaitActive::BlockingEvent, ConnectionPipeline::AwaitMap::BlockingEvent, ConnectionPipeline::GetPG::BlockingEvent, + ClientRequest::PGPipeline::AwaitMap::BlockingEvent, + PG_OSDMapGate::OSDMapBlocker::BlockingEvent, PGMap::PGCreationBlockingEvent, OSD_OSDMapGate::OSDMapBlocker::BlockingEvent > tracking_events; private: - ClientRequest::PGPipeline &pp(PG &pg); + ClientRequest::PGPipeline &client_pp(PG &pg); crimson::net::ConnectionRef conn; // must be after `conn` to ensure the ConnectionPipeline's is alive diff --git a/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.cc b/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.cc index 95a968c14..b4bf2938e 100644 --- a/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.cc +++ b/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.cc @@ -49,7 +49,7 @@ ConnectionPipeline &LogMissingRequestReply::get_connection_pipeline() return get_osd_priv(conn.get()).replicated_request_conn_pipeline; } -ClientRequest::PGPipeline &LogMissingRequestReply::pp(PG &pg) +ClientRequest::PGPipeline &LogMissingRequestReply::client_pp(PG &pg) { return pg.request_pg_pipeline; } diff --git a/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.h b/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.h index cb39e9f6c..c89131fec 100644 --- a/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.h +++ b/ceph/src/crimson/osd/osd_operations/logmissing_request_reply.h @@ -64,7 +64,7 @@ public: > tracking_events; private: - ClientRequest::PGPipeline &pp(PG &pg); + ClientRequest::PGPipeline &client_pp(PG &pg); crimson::net::ConnectionRef conn; // must be after `conn` to ensure the ConnectionPipeline's is alive diff --git a/ceph/src/crimson/osd/osd_operations/peering_event.cc b/ceph/src/crimson/osd/osd_operations/peering_event.cc index b323b4a81..ea4662bd0 100644 --- a/ceph/src/crimson/osd/osd_operations/peering_event.cc +++ b/ceph/src/crimson/osd/osd_operations/peering_event.cc @@ -54,7 +54,7 @@ void PeeringEvent::dump_detail(Formatter *f) const template -PGPeeringPipeline &PeeringEvent::pp(PG &pg) +PGPeeringPipeline &PeeringEvent::peering_pp(PG &pg) { return pg.peering_request_pg_pipeline; } @@ -73,7 +73,7 @@ seastar::future<> PeeringEvent::with_pg( using interruptor = typename T::interruptor; return interruptor::with_interruption([this, pg, &shard_services] { logger().debug("{}: pg present", *this); - return this->template enter_stage(pp(*pg).await_map + return this->template enter_stage(peering_pp(*pg).await_map ).then_interruptible([this, pg] { return this->template with_blocking_event< PG_OSDMapGate::OSDMapBlocker::BlockingEvent @@ -82,7 +82,7 @@ seastar::future<> PeeringEvent::with_pg( std::move(trigger), evt.get_epoch_sent()); }); }).then_interruptible([this, pg](auto) { - return this->template enter_stage(pp(*pg).process); + return this->template enter_stage(peering_pp(*pg).process); }).then_interruptible([this, pg, &shard_services] { return pg->do_peering_event(evt, ctx ).then_interruptible([this, pg, &shard_services] { diff --git a/ceph/src/crimson/osd/osd_operations/peering_event.h b/ceph/src/crimson/osd/osd_operations/peering_event.h index d9c9da58a..e94caead1 100644 --- a/ceph/src/crimson/osd/osd_operations/peering_event.h +++ b/ceph/src/crimson/osd/osd_operations/peering_event.h @@ -51,7 +51,7 @@ public: static constexpr OperationTypeCode type = OperationTypeCode::peering_event; protected: - PGPeeringPipeline &pp(PG &pg); + PGPeeringPipeline &peering_pp(PG &pg); PeeringCtx ctx; pg_shard_t from; diff --git a/ceph/src/crimson/osd/osd_operations/pg_advance_map.cc b/ceph/src/crimson/osd/osd_operations/pg_advance_map.cc index 2514a0e74..3706af810 100644 --- a/ceph/src/crimson/osd/osd_operations/pg_advance_map.cc +++ b/ceph/src/crimson/osd/osd_operations/pg_advance_map.cc @@ -24,7 +24,10 @@ PGAdvanceMap::PGAdvanceMap( ShardServices &shard_services, Ref pg, epoch_t to, PeeringCtx &&rctx, bool do_init) : shard_services(shard_services), pg(pg), to(to), - rctx(std::move(rctx)), do_init(do_init) {} + rctx(std::move(rctx)), do_init(do_init) +{ + logger().debug("{}: created", *this); +} PGAdvanceMap::~PGAdvanceMap() {} @@ -52,6 +55,11 @@ void PGAdvanceMap::dump_detail(Formatter *f) const f->close_section(); } +PGPeeringPipeline &PGAdvanceMap::peering_pp(PG &pg) +{ + return pg.peering_request_pg_pipeline; +} + seastar::future<> PGAdvanceMap::start() { using cached_map_t = OSDMapService::cached_map_t; @@ -60,8 +68,17 @@ seastar::future<> PGAdvanceMap::start() IRef ref = this; return enter_stage<>( - pg->peering_request_pg_pipeline.process + peering_pp(*pg).process ).then([this] { + /* + * PGAdvanceMap is scheduled at pg creation and when + * broadcasting new osdmaps to pgs. We are not able to serialize + * between the two different PGAdvanceMap callers since a new pg + * will get advanced to the latest osdmap at it's creation. + * As a result, we may need to adjust the PGAdvance operation + * 'from' epoch. + * See: https://tracker.ceph.com/issues/61744 + */ from = pg->get_osdmap_epoch(); auto fut = seastar::now(); if (do_init) { @@ -71,10 +88,13 @@ seastar::future<> PGAdvanceMap::start() }); } return fut.then([this] { + ceph_assert(std::cmp_less_equal(*from, to)); return seastar::do_for_each( boost::make_counting_iterator(*from + 1), boost::make_counting_iterator(to + 1), [this](epoch_t next_epoch) { + logger().debug("{}: start: getting map {}", + *this, next_epoch); return shard_services.get_map(next_epoch).then( [this] (cached_map_t&& next_map) { logger().debug("{}: advancing map to {}", diff --git a/ceph/src/crimson/osd/osd_operations/pg_advance_map.h b/ceph/src/crimson/osd/osd_operations/pg_advance_map.h index c7277f790..b712cc12e 100644 --- a/ceph/src/crimson/osd/osd_operations/pg_advance_map.h +++ b/ceph/src/crimson/osd/osd_operations/pg_advance_map.h @@ -49,6 +49,9 @@ public: std::tuple< PGPeeringPipeline::Process::BlockingEvent > tracking_events; + +private: + PGPeeringPipeline &peering_pp(PG &pg); }; } diff --git a/ceph/src/crimson/osd/osd_operations/replicated_request.cc b/ceph/src/crimson/osd/osd_operations/replicated_request.cc index f7d4fa68b..09217575c 100644 --- a/ceph/src/crimson/osd/osd_operations/replicated_request.cc +++ b/ceph/src/crimson/osd/osd_operations/replicated_request.cc @@ -21,7 +21,7 @@ namespace crimson::osd { RepRequest::RepRequest(crimson::net::ConnectionRef&& conn, Ref &&req) : conn{std::move(conn)}, - req{req} + req{std::move(req)} {} void RepRequest::print(std::ostream& os) const @@ -49,7 +49,7 @@ ConnectionPipeline &RepRequest::get_connection_pipeline() return get_osd_priv(conn.get()).replicated_request_conn_pipeline; } -ClientRequest::PGPipeline &RepRequest::pp(PG &pg) +ClientRequest::PGPipeline &RepRequest::client_pp(PG &pg) { return pg.request_pg_pipeline; } @@ -61,7 +61,7 @@ seastar::future<> RepRequest::with_pg( IRef ref = this; return interruptor::with_interruption([this, pg] { logger().debug("{}: pg present", *this); - return this->template enter_stage(pp(*pg).await_map + return this->template enter_stage(client_pp(*pg).await_map ).then_interruptible([this, pg] { return this->template with_blocking_event< PG_OSDMapGate::OSDMapBlocker::BlockingEvent diff --git a/ceph/src/crimson/osd/osd_operations/replicated_request.h b/ceph/src/crimson/osd/osd_operations/replicated_request.h index 78d97ecf4..c742888d9 100644 --- a/ceph/src/crimson/osd/osd_operations/replicated_request.h +++ b/ceph/src/crimson/osd/osd_operations/replicated_request.h @@ -66,7 +66,7 @@ public: > tracking_events; private: - ClientRequest::PGPipeline &pp(PG &pg); + ClientRequest::PGPipeline &client_pp(PG &pg); crimson::net::ConnectionRef conn; PipelineHandle handle; diff --git a/ceph/src/crimson/osd/osd_operations/snaptrim_event.cc b/ceph/src/crimson/osd/osd_operations/snaptrim_event.cc index e63e78481..e4a1b04df 100644 --- a/ceph/src/crimson/osd/osd_operations/snaptrim_event.cc +++ b/ceph/src/crimson/osd/osd_operations/snaptrim_event.cc @@ -30,6 +30,15 @@ namespace crimson { namespace crimson::osd { +PG::interruptible_future<> +PG::SnapTrimMutex::lock(SnapTrimEvent &st_event) noexcept +{ + return st_event.enter_stage(wait_pg + ).then_interruptible([this] { + return mutex.lock(); + }); +} + void SnapTrimEvent::SubOpBlocker::dump_detail(Formatter *f) const { f->open_array_section("dependent_operations"); @@ -83,7 +92,7 @@ SnapTrimEvent::start() }); } -CommonPGPipeline& SnapTrimEvent::pp() +CommonPGPipeline& SnapTrimEvent::client_pp() { return pg->request_pg_pipeline; } @@ -94,7 +103,7 @@ SnapTrimEvent::with_pg( { return interruptor::with_interruption([&shard_services, this] { return enter_stage( - pp().wait_for_active + client_pp().wait_for_active ).then_interruptible([this] { return with_blocking_event([this] (auto&& trigger) { @@ -102,16 +111,18 @@ SnapTrimEvent::with_pg( }); }).then_interruptible([this] { return enter_stage( - pp().recover_missing); + client_pp().recover_missing); }).then_interruptible([] { //return do_recover_missing(pg, get_target_oid()); return seastar::now(); }).then_interruptible([this] { return enter_stage( - pp().get_obc); + client_pp().get_obc); + }).then_interruptible([this] { + return pg->snaptrim_mutex.lock(*this); }).then_interruptible([this] { return enter_stage( - pp().process); + client_pp().process); }).then_interruptible([&shard_services, this] { return interruptor::async([this] { std::vector to_trim; @@ -140,27 +151,32 @@ SnapTrimEvent::with_pg( if (to_trim.empty()) { // the legit ENOENT -> done logger().debug("{}: to_trim is empty! Stopping iteration", *this); + pg->snaptrim_mutex.unlock(); return snap_trim_iertr::make_ready_future( seastar::stop_iteration::yes); } - for (const auto& object : to_trim) { - logger().debug("{}: trimming {}", *this, object); - auto [op, fut] = shard_services.start_operation_may_interrupt< - interruptor, SnapTrimObjSubEvent>( - pg, - object, - snapid); - subop_blocker.emplace_back( - op->get_id(), - std::move(fut) - ); - } - return enter_stage( - wait_subop - ).then_interruptible([this] { + return [&shard_services, this](const auto &to_trim) { + for (const auto& object : to_trim) { + logger().debug("{}: trimming {}", *this, object); + auto [op, fut] = shard_services.start_operation_may_interrupt< + interruptor, SnapTrimObjSubEvent>( + pg, + object, + snapid); + subop_blocker.emplace_back( + op->get_id(), + std::move(fut) + ); + } + return interruptor::now(); + }(to_trim).then_interruptible([this] { + return enter_stage(wait_subop); + }).then_interruptible([this] { logger().debug("{}: awaiting completion", *this); return subop_blocker.wait_completion(); - }).safe_then_interruptible([this] { + }).finally([this] { + pg->snaptrim_mutex.unlock(); + }).safe_then_interruptible([this] { if (!needs_pause) { return interruptor::now(); } @@ -191,7 +207,7 @@ SnapTrimEvent::with_pg( } -CommonPGPipeline& SnapTrimObjSubEvent::pp() +CommonPGPipeline& SnapTrimObjSubEvent::client_pp() { return pg->request_pg_pipeline; } @@ -481,7 +497,7 @@ SnapTrimObjSubEvent::with_pg( ShardServices &shard_services, Ref _pg) { return enter_stage( - pp().wait_for_active + client_pp().wait_for_active ).then_interruptible([this] { return with_blocking_event([this] (auto&& trigger) { @@ -489,23 +505,23 @@ SnapTrimObjSubEvent::with_pg( }); }).then_interruptible([this] { return enter_stage( - pp().recover_missing); + client_pp().recover_missing); }).then_interruptible([] { //return do_recover_missing(pg, get_target_oid()); return seastar::now(); }).then_interruptible([this] { return enter_stage( - pp().get_obc); + client_pp().get_obc); }).then_interruptible([this] { logger().debug("{}: getting obc for {}", *this, coid); // end of commonality - // with_head_and_clone_obc lock both clone's and head's obcs - return pg->obc_loader.with_head_and_clone_obc( + // with_clone_obc_direct lock both clone's and head's obcs + return pg->obc_loader.with_clone_obc_direct( coid, [this](auto head_obc, auto clone_obc) { logger().debug("{}: got clone_obc={}", *this, clone_obc->get_oid()); return enter_stage( - pp().process + client_pp().process ).then_interruptible( [this,clone_obc=std::move(clone_obc), head_obc=std::move(head_obc)]() mutable { logger().debug("{}: processing clone_obc={}", *this, clone_obc->get_oid()); diff --git a/ceph/src/crimson/osd/osd_operations/snaptrim_event.h b/ceph/src/crimson/osd/osd_operations/snaptrim_event.h index f4ae1bf06..a3a970a04 100644 --- a/ceph/src/crimson/osd/osd_operations/snaptrim_event.h +++ b/ceph/src/crimson/osd/osd_operations/snaptrim_event.h @@ -25,7 +25,6 @@ namespace crimson::osd { class OSD; class ShardServices; -class PG; // trim up to `max` objects for snapshot `snapid class SnapTrimEvent final : public PhasedOperationT { @@ -58,7 +57,7 @@ public: ShardServices &shard_services, Ref pg); private: - CommonPGPipeline& pp(); + CommonPGPipeline& client_pp(); // bases on 998cb8c141bb89aafae298a9d5e130fbd78fe5f2 struct SubOpBlocker : crimson::BlockerT { @@ -107,9 +106,12 @@ public: CommonPGPipeline::GetOBC::BlockingEvent, CommonPGPipeline::Process::BlockingEvent, WaitSubop::BlockingEvent, + PG::SnapTrimMutex::WaitPG::BlockingEvent, WaitTrimTimer::BlockingEvent, CompletionEvent > tracking_events; + + friend class PG::SnapTrimMutex; }; // remove single object. a SnapTrimEvent can create multiple subrequests. @@ -141,7 +143,7 @@ public: remove_or_update_iertr::future<> with_pg( ShardServices &shard_services, Ref pg); - CommonPGPipeline& pp(); + CommonPGPipeline& client_pp(); private: object_stat_sum_t delta_stats; diff --git a/ceph/src/crimson/osd/osdmap_gate.cc b/ceph/src/crimson/osd/osdmap_gate.cc index b1fb66924..171ec436d 100644 --- a/ceph/src/crimson/osd/osdmap_gate.cc +++ b/ceph/src/crimson/osd/osdmap_gate.cc @@ -54,6 +54,10 @@ seastar::future OSDMapGate::wait_for_map( template void OSDMapGate::got_map(epoch_t epoch) { + if (epoch == 0) { + return; + } + ceph_assert(epoch > current); current = epoch; auto first = waiting_peering.begin(); auto last = waiting_peering.upper_bound(epoch); diff --git a/ceph/src/crimson/osd/pg.cc b/ceph/src/crimson/osd/pg.cc index 3d5bb20d4..7cf3b158c 100644 --- a/ceph/src/crimson/osd/pg.cc +++ b/ceph/src/crimson/osd/pg.cc @@ -535,7 +535,7 @@ void PG::on_active_advmap(const OSDMapRef &osdmap) } logger().info("{}: {} new removed snaps {}, snap_trimq now{}", *this, __func__, it->second, snap_trimq); - assert(!bad || local_conf().get_val("osd_debug_verify_cached_snaps")); + assert(!bad || !local_conf().get_val("osd_debug_verify_cached_snaps")); } } @@ -1510,6 +1510,7 @@ bool PG::is_degraded_or_backfilling_object(const hobject_t& soid) const { // we are backfilling it if (is_backfill_target(peer) && peering_state.get_peer_info(peer).last_backfill <= soid && + recovery_handler->backfill_state && recovery_handler->backfill_state->get_last_backfill_started() >= soid && recovery_backend->is_recovering(soid)) { return true; diff --git a/ceph/src/crimson/osd/pg.h b/ceph/src/crimson/osd/pg.h index 3a7d21ba9..d96db2e20 100644 --- a/ceph/src/crimson/osd/pg.h +++ b/ceph/src/crimson/osd/pg.h @@ -61,6 +61,7 @@ namespace crimson::os { namespace crimson::osd { class OpsExecuter; class BackfillRecovery; +class SnapTrimEvent; class PG : public boost::intrusive_ref_counter< PG, @@ -552,6 +553,20 @@ public: eversion_t &version); private: + + struct SnapTrimMutex { + struct WaitPG : OrderedConcurrentPhaseT { + static constexpr auto type_name = "SnapTrimEvent::wait_pg"; + } wait_pg; + seastar::shared_mutex mutex; + + interruptible_future<> lock(SnapTrimEvent &st_event) noexcept; + + void unlock() noexcept { + mutex.unlock(); + } + } snaptrim_mutex; + using do_osd_ops_ertr = crimson::errorator< crimson::ct_error::eagain>; using do_osd_ops_iertr = diff --git a/ceph/src/crimson/osd/pg_backend.cc b/ceph/src/crimson/osd/pg_backend.cc index d69e5e204..02acb9a55 100644 --- a/ceph/src/crimson/osd/pg_backend.cc +++ b/ceph/src/crimson/osd/pg_backend.cc @@ -1338,13 +1338,19 @@ PGBackend::omap_get_header( OSDOp& osd_op, object_stat_sum_t& delta_stats) const { - return omap_get_header(coll, ghobject_t{os.oi.soid}).safe_then_interruptible( - [&delta_stats, &osd_op] (ceph::bufferlist&& header) { - osd_op.outdata = std::move(header); - delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); - delta_stats.num_rd++; - return seastar::now(); - }); + if (os.oi.is_omap()) { + return omap_get_header(coll, ghobject_t{os.oi.soid}).safe_then_interruptible( + [&delta_stats, &osd_op] (ceph::bufferlist&& header) { + osd_op.outdata = std::move(header); + delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); + delta_stats.num_rd++; + return seastar::now(); + }); + } else { + // no omap? return empty data but not ENOENT. This is imporant for + // the case when the object is being creating due to to may_write(). + return seastar::now(); + } } PGBackend::ll_read_ierrorator::future<> diff --git a/ceph/src/crimson/osd/pg_map.h b/ceph/src/crimson/osd/pg_map.h index f4b38ae45..3269de434 100644 --- a/ceph/src/crimson/osd/pg_map.h +++ b/ceph/src/crimson/osd/pg_map.h @@ -21,9 +21,11 @@ class PG; /** * PGShardMapping * - * Maps pgs to shards. + * Maintains a mapping from spg_t to the core containing that PG. Internally, each + * core has a local copy of the mapping to enable core-local lookups. Updates + * are proxied to core 0, and the back out to all other cores -- see maybe_create_pg. */ -class PGShardMapping { +class PGShardMapping : public seastar::peering_sharded_service { public: /// Returns mapping if present, NULL_CORE otherwise core_id_t get_pg_mapping(spg_t pgid) { @@ -33,44 +35,69 @@ public: } /// Returns mapping for pgid, creates new one if it doesn't already exist - core_id_t maybe_create_pg(spg_t pgid, core_id_t core = NULL_CORE) { - auto [insert_iter, inserted] = pg_to_core.emplace(pgid, core); - if (!inserted) { - ceph_assert_always(insert_iter->second != NULL_CORE); + seastar::future maybe_create_pg( + spg_t pgid, + core_id_t core = NULL_CORE) { + auto find_iter = pg_to_core.find(pgid); + if (find_iter != pg_to_core.end()) { + ceph_assert_always(find_iter->second != NULL_CORE); if (core != NULL_CORE) { - ceph_assert_always(insert_iter->second == core); + ceph_assert_always(find_iter->second == core); } - return insert_iter->second; + return seastar::make_ready_future(find_iter->second); } else { - ceph_assert_always(core_to_num_pgs.size() > 0); - std::map::iterator core_iter; - if (core == NULL_CORE) { - core_iter = std::min_element( - core_to_num_pgs.begin(), - core_to_num_pgs.end(), - [](const auto &left, const auto &right) { - return left.second < right.second; + return container().invoke_on(0,[pgid, core] + (auto &primary_mapping) { + auto [insert_iter, inserted] = primary_mapping.pg_to_core.emplace(pgid, core); + ceph_assert_always(inserted); + ceph_assert_always(primary_mapping.core_to_num_pgs.size() > 0); + std::map::iterator core_iter; + if (core == NULL_CORE) { + core_iter = std::min_element( + primary_mapping.core_to_num_pgs.begin(), + primary_mapping.core_to_num_pgs.end(), + [](const auto &left, const auto &right) { + return left.second < right.second; + }); + } else { + core_iter = primary_mapping.core_to_num_pgs.find(core); + } + ceph_assert_always(primary_mapping.core_to_num_pgs.end() != core_iter); + insert_iter->second = core_iter->first; + core_iter->second++; + return primary_mapping.container().invoke_on_others( + [pgid = insert_iter->first, core = insert_iter->second] + (auto &other_mapping) { + ceph_assert_always(core != NULL_CORE); + auto [insert_iter, inserted] = other_mapping.pg_to_core.emplace(pgid, core); + ceph_assert_always(inserted); }); - } else { - core_iter = core_to_num_pgs.find(core); - } - ceph_assert_always(core_to_num_pgs.end() != core_iter); - insert_iter->second = core_iter->first; - core_iter->second++; - return insert_iter->second; + }).then([this, pgid] { + auto find_iter = pg_to_core.find(pgid); + return seastar::make_ready_future(find_iter->second); + }); } } /// Remove pgid - void remove_pg(spg_t pgid) { - auto iter = pg_to_core.find(pgid); - ceph_assert_always(iter != pg_to_core.end()); - ceph_assert_always(iter->second != NULL_CORE); - auto count_iter = core_to_num_pgs.find(iter->second); - ceph_assert_always(count_iter != core_to_num_pgs.end()); - ceph_assert_always(count_iter->second > 0); - --(count_iter->second); - pg_to_core.erase(iter); + seastar::future<> remove_pg(spg_t pgid) { + return container().invoke_on(0, [pgid](auto &primary_mapping) { + auto iter = primary_mapping.pg_to_core.find(pgid); + ceph_assert_always(iter != primary_mapping.pg_to_core.end()); + ceph_assert_always(iter->second != NULL_CORE); + auto count_iter = primary_mapping.core_to_num_pgs.find(iter->second); + ceph_assert_always(count_iter != primary_mapping.core_to_num_pgs.end()); + ceph_assert_always(count_iter->second > 0); + --(count_iter->second); + primary_mapping.pg_to_core.erase(iter); + return primary_mapping.container().invoke_on_others( + [pgid](auto &other_mapping) { + auto iter = other_mapping.pg_to_core.find(pgid); + ceph_assert_always(iter != other_mapping.pg_to_core.end()); + ceph_assert_always(iter->second != NULL_CORE); + other_mapping.pg_to_core.erase(iter); + }); + }); } size_t get_num_pgs() const { return pg_to_core.size(); } diff --git a/ceph/src/crimson/osd/pg_shard_manager.cc b/ceph/src/crimson/osd/pg_shard_manager.cc index 03174b1df..6061c856b 100644 --- a/ceph/src/crimson/osd/pg_shard_manager.cc +++ b/ceph/src/crimson/osd/pg_shard_manager.cc @@ -12,39 +12,6 @@ namespace { namespace crimson::osd { -seastar::future<> PGShardManager::start( - const int whoami, - crimson::net::Messenger &cluster_msgr, - crimson::net::Messenger &public_msgr, - crimson::mon::Client &monc, - crimson::mgr::Client &mgrc, - crimson::os::FuturizedStore &store) -{ - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); - return osd_singleton_state.start_single( - whoami, std::ref(cluster_msgr), std::ref(public_msgr), - std::ref(monc), std::ref(mgrc) - ).then([this, whoami, &store] { - ceph::mono_time startup_time = ceph::mono_clock::now(); - return shard_services.start( - std::ref(osd_singleton_state), - whoami, - startup_time, - osd_singleton_state.local().perf, - osd_singleton_state.local().recoverystate_perf, - std::ref(store)); - }); -} - -seastar::future<> PGShardManager::stop() -{ - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); - return shard_services.stop( - ).then([this] { - return osd_singleton_state.stop(); - }); -} - seastar::future<> PGShardManager::load_pgs(crimson::os::FuturizedStore& store) { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); @@ -56,12 +23,12 @@ seastar::future<> PGShardManager::load_pgs(crimson::os::FuturizedStore& store) auto[coll, shard_core] = coll_core; spg_t pgid; if (coll.is_pg(&pgid)) { - auto core = get_osd_singleton_state( - ).pg_to_shard_mapping.maybe_create_pg( - pgid, shard_core); - return with_remote_shard_state( - core, - [pgid]( + return get_pg_to_shard_mapping().maybe_create_pg( + pgid, shard_core + ).then([this, pgid] (auto core) { + return this->template with_remote_shard_state( + core, + [pgid]( PerShardState &per_shard_state, ShardServices &shard_services) { return shard_services.load_pg( @@ -72,6 +39,7 @@ seastar::future<> PGShardManager::load_pgs(crimson::os::FuturizedStore& store) return seastar::now(); }); }); + }); } else if (coll.is_temp(&pgid)) { logger().warn( "found temp collection on crimson osd, should be impossible: {}", @@ -117,8 +85,13 @@ seastar::future<> PGShardManager::broadcast_map_to_pgs(epoch_t epoch) local_service, epoch ); }).then([this, epoch] { - get_osd_singleton_state().osdmap_gate.got_map(epoch); - return seastar::now(); + logger().debug("PGShardManager::broadcast_map_to_pgs " + "broadcasted up to {}", + epoch); + return shard_services.invoke_on_all([epoch](auto &local_service) { + local_service.local_state.osdmap_gate.got_map(epoch); + return seastar::now(); + }); }); } diff --git a/ceph/src/crimson/osd/pg_shard_manager.h b/ceph/src/crimson/osd/pg_shard_manager.h index fd99304ba..2f3a3015d 100644 --- a/ceph/src/crimson/osd/pg_shard_manager.h +++ b/ceph/src/crimson/osd/pg_shard_manager.h @@ -24,8 +24,9 @@ namespace crimson::osd { * etc) */ class PGShardManager { - seastar::sharded osd_singleton_state; - seastar::sharded shard_services; + seastar::sharded &osd_singleton_state; + seastar::sharded &shard_services; + seastar::sharded &pg_to_shard_mapping; #define FORWARD_CONST(FROM_METHOD, TO_METHOD, TARGET) \ template \ @@ -46,16 +47,13 @@ public: using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; - PGShardManager() = default; - - seastar::future<> start( - const int whoami, - crimson::net::Messenger &cluster_msgr, - crimson::net::Messenger &public_msgr, - crimson::mon::Client &monc, - crimson::mgr::Client &mgrc, - crimson::os::FuturizedStore &store); - seastar::future<> stop(); + PGShardManager( + seastar::sharded &osd_singleton_state, + seastar::sharded &shard_services, + seastar::sharded &pg_to_shard_mapping) + : osd_singleton_state(osd_singleton_state), + shard_services(shard_services), + pg_to_shard_mapping(pg_to_shard_mapping) {} auto &get_osd_singleton_state() { ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); @@ -66,15 +64,15 @@ public: return osd_singleton_state.local(); } auto &get_shard_services() { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return shard_services.local(); } auto &get_shard_services() const { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return shard_services.local(); } auto &get_local_state() { return get_shard_services().local_state; } auto &get_local_state() const { return get_shard_services().local_state; } + auto &get_pg_to_shard_mapping() { return pg_to_shard_mapping.local(); } + auto &get_pg_to_shard_mapping() const { return pg_to_shard_mapping.local(); } seastar::future<> update_map(local_cached_map_t &&map) { get_osd_singleton_state().update_map( @@ -109,22 +107,22 @@ public: FORWARD_TO_OSD_SINGLETON(send_pg_created) // osd state forwards - FORWARD(is_active, is_active, get_osd_singleton_state().osd_state) - FORWARD(is_preboot, is_preboot, get_osd_singleton_state().osd_state) - FORWARD(is_booting, is_booting, get_osd_singleton_state().osd_state) - FORWARD(is_stopping, is_stopping, get_osd_singleton_state().osd_state) - FORWARD(is_prestop, is_prestop, get_osd_singleton_state().osd_state) - FORWARD(is_initializing, is_initializing, get_osd_singleton_state().osd_state) - FORWARD(set_prestop, set_prestop, get_osd_singleton_state().osd_state) - FORWARD(set_preboot, set_preboot, get_osd_singleton_state().osd_state) - FORWARD(set_booting, set_booting, get_osd_singleton_state().osd_state) - FORWARD(set_stopping, set_stopping, get_osd_singleton_state().osd_state) - FORWARD(set_active, set_active, get_osd_singleton_state().osd_state) - FORWARD(when_active, when_active, get_osd_singleton_state().osd_state) - FORWARD_CONST(get_osd_state_string, to_string, get_osd_singleton_state().osd_state) - - FORWARD(got_map, got_map, get_osd_singleton_state().osdmap_gate) - FORWARD(wait_for_map, wait_for_map, get_osd_singleton_state().osdmap_gate) + FORWARD(is_active, is_active, get_shard_services().local_state.osd_state) + FORWARD(is_preboot, is_preboot, get_shard_services().local_state.osd_state) + FORWARD(is_booting, is_booting, get_shard_services().local_state.osd_state) + FORWARD(is_stopping, is_stopping, get_shard_services().local_state.osd_state) + FORWARD(is_prestop, is_prestop, get_shard_services().local_state.osd_state) + FORWARD(is_initializing, is_initializing, get_shard_services().local_state.osd_state) + FORWARD(set_prestop, set_prestop, get_shard_services().local_state.osd_state) + FORWARD(set_preboot, set_preboot, get_shard_services().local_state.osd_state) + FORWARD(set_booting, set_booting, get_shard_services().local_state.osd_state) + FORWARD(set_stopping, set_stopping, get_shard_services().local_state.osd_state) + FORWARD(set_active, set_active, get_shard_services().local_state.osd_state) + FORWARD(when_active, when_active, get_shard_services().local_state.osd_state) + FORWARD_CONST(get_osd_state_string, to_string, get_shard_services().local_state.osd_state) + + FORWARD(got_map, got_map, get_shard_services().local_state.osdmap_gate) + FORWARD(wait_for_map, wait_for_map, get_shard_services().local_state.osdmap_gate) // Metacoll FORWARD_TO_OSD_SINGLETON(init_meta_coll) @@ -142,7 +140,6 @@ public: template auto with_remote_shard_state(core_id_t core, F &&f) { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return shard_services.invoke_on( core, [f=std::move(f)](auto &target_shard_services) mutable { return std::invoke( @@ -156,7 +153,6 @@ public: core_id_t core, typename T::IRef &&op, F &&f) { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); if (seastar::this_shard_id() == core) { auto &target_shard_services = shard_services.local(); return std::invoke( @@ -188,20 +184,19 @@ public: typename T::IRef op ) { ceph_assert(op->use_count() == 1); - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); auto &logger = crimson::get_logger(ceph_subsys_osd); static_assert(T::can_create()); logger.debug("{}: can_create", *op); - auto core = get_osd_singleton_state().pg_to_shard_mapping.maybe_create_pg( - op->get_pgid()); - get_local_state().registry.remove_from_registry(*op); - return with_remote_shard_state_and_op( - core, std::move(op), - [](PerShardState &per_shard_state, - ShardServices &shard_services, - typename T::IRef op) { + return get_pg_to_shard_mapping().maybe_create_pg( + op->get_pgid() + ).then([this, op = std::move(op)](auto core) mutable { + return this->template with_remote_shard_state_and_op( + core, std::move(op), + [](PerShardState &per_shard_state, + ShardServices &shard_services, + typename T::IRef op) { per_shard_state.registry.add_to_registry(*op); auto &logger = crimson::get_logger(ceph_subsys_osd); auto &opref = *op; @@ -211,7 +206,7 @@ public: auto &&trigger) { return shard_services.get_or_create_pg( std::move(trigger), - opref.get_pgid(), opref.get_epoch(), + opref.get_pgid(), std::move(opref.get_create_info()) ); }).safe_then([&logger, &shard_services, &opref](Ref pgref) { @@ -224,6 +219,7 @@ public: }) ).then([op=std::move(op)] {}); }); + }); } /// Runs opref on the appropriate core, waiting for pg as necessary @@ -232,20 +228,19 @@ public: typename T::IRef op ) { ceph_assert(op->use_count() == 1); - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); auto &logger = crimson::get_logger(ceph_subsys_osd); static_assert(!T::can_create()); logger.debug("{}: !can_create", *op); - auto core = get_osd_singleton_state().pg_to_shard_mapping.maybe_create_pg( - op->get_pgid()); - get_local_state().registry.remove_from_registry(*op); - return with_remote_shard_state_and_op( - core, std::move(op), - [](PerShardState &per_shard_state, - ShardServices &shard_services, - typename T::IRef op) { + return get_pg_to_shard_mapping().maybe_create_pg( + op->get_pgid() + ).then([this, op = std::move(op)](auto core) mutable { + return this->template with_remote_shard_state_and_op( + core, std::move(op), + [](PerShardState &per_shard_state, + ShardServices &shard_services, + typename T::IRef op) { per_shard_state.registry.add_to_registry(*op); auto &logger = crimson::get_logger(ceph_subsys_osd); auto &opref = *op; @@ -265,6 +260,7 @@ public: }) ).then([op=std::move(op)] {}); }); + }); } seastar::future<> load_pgs(crimson::os::FuturizedStore& store); @@ -313,20 +309,19 @@ public: */ template void for_each_pgid(F &&f) const { - return get_osd_singleton_state().pg_to_shard_mapping.for_each_pgid( + return get_pg_to_shard_mapping().for_each_pgid( std::forward(f)); } auto get_num_pgs() const { - return get_osd_singleton_state().pg_to_shard_mapping.get_num_pgs(); + return get_pg_to_shard_mapping().get_num_pgs(); } seastar::future<> broadcast_map_to_pgs(epoch_t epoch); template auto with_pg(spg_t pgid, F &&f) { - core_id_t core = get_osd_singleton_state( - ).pg_to_shard_mapping.get_pg_mapping(pgid); + core_id_t core = get_pg_to_shard_mapping().get_pg_mapping(pgid); return with_remote_shard_state( core, [pgid, f=std::move(f)](auto &local_state, auto &local_service) mutable { @@ -338,7 +333,6 @@ public: template auto start_pg_operation(Args&&... args) { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); auto op = get_local_state().registry.create_operation( std::forward(args)...); auto &logger = crimson::get_logger(ceph_subsys_osd); @@ -352,35 +346,30 @@ public: auto fut = opref.template enter_stage<>( opref.get_connection_pipeline().await_active ).then([this, &opref, &logger] { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); logger.debug("{}: start_pg_operation in await_active stage", opref); - return get_osd_singleton_state().osd_state.when_active(); + return get_shard_services().local_state.osd_state.when_active(); }).then([&logger, &opref] { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); logger.debug("{}: start_pg_operation active, entering await_map", opref); return opref.template enter_stage<>( opref.get_connection_pipeline().await_map); }).then([this, &logger, &opref] { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); logger.debug("{}: start_pg_operation await_map stage", opref); using OSDMapBlockingEvent = OSD_OSDMapGate::OSDMapBlocker::BlockingEvent; return opref.template with_blocking_event( [this, &opref](auto &&trigger) { std::ignore = this; - return get_osd_singleton_state().osdmap_gate.wait_for_map( - std::move(trigger), - opref.get_epoch(), - &get_shard_services()); - }); + return get_shard_services().local_state.osdmap_gate.wait_for_map( + std::move(trigger), + opref.get_epoch(), + &get_shard_services()); + }); }).then([&logger, &opref](auto epoch) { - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); logger.debug("{}: got map {}, entering get_pg", opref, epoch); return opref.template enter_stage<>( opref.get_connection_pipeline().get_pg); }).then([this, &logger, &opref, op=std::move(op)]() mutable { logger.debug("{}: in get_pg core {}", opref, seastar::this_shard_id()); - ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); logger.debug("{}: in get_pg", opref); if constexpr (T::can_create()) { logger.debug("{}: can_create", opref); diff --git a/ceph/src/crimson/osd/shard_services.cc b/ceph/src/crimson/osd/shard_services.cc index 647d8d6be..a6431305d 100644 --- a/ceph/src/crimson/osd/shard_services.cc +++ b/ceph/src/crimson/osd/shard_services.cc @@ -38,9 +38,12 @@ PerShardState::PerShardState( ceph::mono_time startup_time, PerfCounters *perf, PerfCounters *recoverystate_perf, - crimson::os::FuturizedStore &store) + crimson::os::FuturizedStore &store, + OSDState &osd_state) : whoami(whoami), store(store.get_sharded_store()), + osd_state(osd_state), + osdmap_gate("PerShardState::osdmap_gate"), perf(perf), recoverystate_perf(recoverystate_perf), throttler(crimson::common::local_conf()), next_tid( @@ -121,7 +124,6 @@ OSDSingletonState::OSDSingletonState( crimson::mon::Client &monc, crimson::mgr::Client &mgrc) : whoami(whoami), - osdmap_gate("OSDSingletonState::osdmap_gate"), cluster_msgr(cluster_msgr), public_msgr(public_msgr), monc(monc), @@ -349,8 +351,10 @@ OSDSingletonState::get_local_map(epoch_t e) { // TODO: use LRU cache for managing osdmap, fallback to disk if we have to if (auto found = osdmaps.find(e); found) { + logger().debug("{} osdmap.{} found in cache", __func__, e); return seastar::make_ready_future(std::move(found)); } else { + logger().debug("{} loading osdmap.{} from disk", __func__, e); return load_map(e).then([e, this](std::unique_ptr osdmap) { return seastar::make_ready_future( osdmaps.insert(e, std::move(osdmap))); @@ -370,8 +374,10 @@ seastar::future OSDSingletonState::load_map_bl( epoch_t e) { if (std::optional found = map_bl_cache.find(e); found) { + logger().debug("{} osdmap.{} found in cache", __func__, e); return seastar::make_ready_future(*found); } else { + logger().debug("{} loading osdmap.{} from disk", __func__, e); return meta_coll->load_map(e); } } @@ -401,14 +407,14 @@ seastar::future> OSDSingletonState::load_map_bls( seastar::future> OSDSingletonState::load_map(epoch_t e) { auto o = std::make_unique(); - if (e > 0) { - return load_map_bl(e).then([o=std::move(o)](bufferlist bl) mutable { - o->decode(bl); - return seastar::make_ready_future>(std::move(o)); - }); - } else { + logger().info("{} osdmap.{}", __func__, e); + if (e == 0) { return seastar::make_ready_future>(std::move(o)); } + return load_map_bl(e).then([o=std::move(o)](bufferlist bl) mutable { + o->decode(bl); + return seastar::make_ready_future>(std::move(o)); + }); } seastar::future<> OSDSingletonState::store_maps(ceph::os::Transaction& t, @@ -421,12 +427,15 @@ seastar::future<> OSDSingletonState::store_maps(ceph::os::Transaction& t, if (auto p = m->maps.find(e); p != m->maps.end()) { auto o = std::make_unique(); o->decode(p->second); - logger().info("store_maps osdmap.{}", e); + logger().info("store_maps storing osdmap.{}", e); store_map_bl(t, e, std::move(std::move(p->second))); osdmaps.insert(e, std::move(o)); return seastar::now(); } else if (auto p = m->incremental_maps.find(e); p != m->incremental_maps.end()) { + logger().info("store_maps found osdmap.{} incremental map, " + "loading osdmap.{}", e, e - 1); + ceph_assert(std::cmp_greater(e, 0u)); return load_map(e - 1).then([e, bl=p->second, &t, this](auto o) { OSDMap::Incremental inc; auto i = bl.cbegin(); @@ -434,6 +443,7 @@ seastar::future<> OSDSingletonState::store_maps(ceph::os::Transaction& t, o->apply_incremental(inc); bufferlist fbl; o->encode(fbl, inc.encode_features | CEPH_FEATURE_RESERVED); + logger().info("store_maps storing osdmap.{}", o->get_epoch()); store_map_bl(t, e, std::move(fbl)); osdmaps.insert(e, std::move(o)); return seastar::now(); @@ -603,7 +613,6 @@ ShardServices::get_or_create_pg_ret ShardServices::get_or_create_pg( PGMap::PGCreationBlockingEvent::TriggerI&& trigger, spg_t pgid, - epoch_t epoch, std::unique_ptr info) { if (info) { @@ -700,6 +709,9 @@ seastar::future<> OSDSingletonState::send_incremental_map( crimson::net::Connection &conn, epoch_t first) { + logger().info("{}: first osdmap: {} " + "superblock's oldest map: {}", + __func__, first, superblock.oldest_map); if (first >= superblock.oldest_map) { return load_map_bls( first, superblock.newest_map diff --git a/ceph/src/crimson/osd/shard_services.h b/ceph/src/crimson/osd/shard_services.h index 12bb23ac2..9b7553e7b 100644 --- a/ceph/src/crimson/osd/shard_services.h +++ b/ceph/src/crimson/osd/shard_services.h @@ -45,9 +45,6 @@ class BufferedRecoveryMessages; namespace crimson::osd { -// seastar::sharded puts start_single on core 0 -constexpr core_id_t PRIMARY_CORE = 0; - class PGShardManager; /** @@ -58,6 +55,7 @@ class PGShardManager; class PerShardState { friend class ShardServices; friend class PGShardManager; + friend class OSD; using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; @@ -68,6 +66,9 @@ class PerShardState { crimson::os::FuturizedStore::Shard &store; crimson::common::CephContext cct; + OSDState &osd_state; + OSD_OSDMapGate osdmap_gate; + PerfCounters *perf = nullptr; PerfCounters *recoverystate_perf = nullptr; @@ -188,7 +189,8 @@ public: ceph::mono_time startup_time, PerfCounters *perf, PerfCounters *recoverystate_perf, - crimson::os::FuturizedStore &store); + crimson::os::FuturizedStore &store, + OSDState& osd_state); }; /** @@ -200,6 +202,7 @@ public: class OSDSingletonState : public md_config_obs_t { friend class ShardServices; friend class PGShardManager; + friend class OSD; using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; @@ -218,8 +221,6 @@ private: PerfCounters *perf = nullptr; PerfCounters *recoverystate_perf = nullptr; - OSDState osd_state; - SharedLRU osdmaps; SimpleLRU map_bl_cache; @@ -228,7 +229,6 @@ private: void update_map(cached_map_t new_osdmap) { osdmap = std::move(new_osdmap); } - OSD_OSDMapGate osdmap_gate; crimson::net::Messenger &cluster_msgr; crimson::net::Messenger &public_msgr; @@ -280,9 +280,6 @@ private: void requeue_pg_temp(); seastar::future<> send_pg_temp(); - // TODO: add config to control mapping - PGShardMapping pg_to_shard_mapping{0, seastar::smp::count}; - std::set pg_created; seastar::future<> send_pg_created(pg_t pgid); seastar::future<> send_pg_created(); @@ -321,11 +318,13 @@ private: */ class ShardServices : public OSDMapService { friend class PGShardManager; + friend class OSD; using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; PerShardState local_state; seastar::sharded &osd_singleton_state; + PGShardMapping& pg_to_shard_mapping; template auto with_singleton(F &&f, Args&&... args) { @@ -368,9 +367,11 @@ public: template ShardServices( seastar::sharded &osd_singleton_state, + PGShardMapping& pg_to_shard_mapping, PSSArgs&&... args) : local_state(std::forward(args)...), - osd_singleton_state(osd_singleton_state) {} + osd_singleton_state(osd_singleton_state), + pg_to_shard_mapping(pg_to_shard_mapping) {} FORWARD_TO_OSD_SINGLETON(send_to_osd) @@ -380,10 +381,7 @@ public: auto remove_pg(spg_t pgid) { local_state.pg_map.remove_pg(pgid); - return with_singleton( - [pgid](auto &osstate) { - osstate.pg_to_shard_mapping.remove_pg(pgid); - }); + return pg_to_shard_mapping.remove_pg(pgid); } crimson::common::CephContext *get_cct() { @@ -427,7 +425,6 @@ public: get_or_create_pg_ret get_or_create_pg( PGMap::PGCreationBlockingEvent::TriggerI&&, spg_t pgid, - epoch_t epoch, std::unique_ptr info); using wait_for_pg_ertr = PGMap::wait_for_pg_ertr; diff --git a/ceph/src/crimson/osd/state.h b/ceph/src/crimson/osd/state.h index 7413e58fa..f0676a4ec 100644 --- a/ceph/src/crimson/osd/state.h +++ b/ceph/src/crimson/osd/state.h @@ -10,7 +10,22 @@ class OSDMap; -class OSDState { +namespace crimson::osd { + +// seastar::sharded puts start_single on core 0 +constexpr core_id_t PRIMARY_CORE = 0; + +/** + * OSDState + * + * Maintains state representing the OSD's progress from booting through + * shutdown. + * + * Shards other than PRIMARY_CORE may use their local instance to check + * on ACTIVE and STOPPING. All other methods are restricted to + * PRIMARY_CORE (such methods start with an assert to this effect). + */ +class OSDState : public seastar::peering_sharded_service { enum class State { INITIALIZING, @@ -25,14 +40,29 @@ class OSDState { State state = State::INITIALIZING; mutable seastar::shared_promise<> wait_for_active; + /// Sets local instance state to active, called from set_active + void _set_active() { + state = State::ACTIVE; + wait_for_active.set_value(); + wait_for_active = {}; + } + /// Sets local instance state to stopping, called from set_stopping + void _set_stopping() { + state = State::STOPPING; + wait_for_active.set_exception(crimson::common::system_shutdown_exception{}); + wait_for_active = {}; + } public: bool is_initializing() const { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::INITIALIZING; } bool is_preboot() const { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::PREBOOT; } bool is_booting() const { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::BOOTING; } bool is_active() const { @@ -43,32 +73,41 @@ public: : wait_for_active.get_shared_future(); }; bool is_prestop() const { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::PRESTOP; } bool is_stopping() const { return state == State::STOPPING; } bool is_waiting_for_healthy() const { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); return state == State::WAITING_FOR_HEALTHY; } void set_preboot() { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); state = State::PREBOOT; } void set_booting() { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); state = State::BOOTING; } - void set_active() { - state = State::ACTIVE; - wait_for_active.set_value(); - wait_for_active = {}; + /// Sets all shards to active + seastar::future<> set_active() { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); + return container().invoke_on_all([](auto& osd_state) { + osd_state._set_active(); + }); } void set_prestop() { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); state = State::PRESTOP; } - void set_stopping() { - state = State::STOPPING; - wait_for_active.set_exception(crimson::common::system_shutdown_exception{}); - wait_for_active = {}; + /// Sets all shards to stopping + seastar::future<> set_stopping() { + ceph_assert(seastar::this_shard_id() == PRIMARY_CORE); + return container().invoke_on_all([](auto& osd_state) { + osd_state._set_stopping(); + }); } std::string_view to_string() const { switch (state) { @@ -88,3 +127,4 @@ inline std::ostream& operator<<(std::ostream& os, const OSDState& s) { return os << s.to_string(); } +} diff --git a/ceph/src/crimson/osd/watch.cc b/ceph/src/crimson/osd/watch.cc index f71d915bb..4573333c3 100644 --- a/ceph/src/crimson/osd/watch.cc +++ b/ceph/src/crimson/osd/watch.cc @@ -111,7 +111,9 @@ seastar::future<> Watch::send_notify_msg(NotifyRef notify) seastar::future<> Watch::start_notify(NotifyRef notify) { - logger().info("{} adding notify(id={})", __func__, notify->ninfo.notify_id); + logger().debug("{} gid={} cookie={} starting notify(id={})", + __func__, get_watcher_gid(), get_cookie(), + notify->ninfo.notify_id); auto [ it, emplaced ] = in_progress_notifies.emplace(std::move(notify)); ceph_assert(emplaced); ceph_assert(is_alive()); @@ -122,15 +124,24 @@ seastar::future<> Watch::notify_ack( const uint64_t notify_id, const ceph::bufferlist& reply_bl) { - logger().info("{}", __func__); - return seastar::do_for_each(in_progress_notifies, - [this_shared=shared_from_this(), reply_bl] (auto notify) { - return notify->complete_watcher(this_shared, reply_bl); - } - ).then([this] { - in_progress_notifies.clear(); + logger().debug("{} gid={} cookie={} notify_id={}", + __func__, get_watcher_gid(), get_cookie(), notify_id); + const auto it = in_progress_notifies.find(notify_id); + if (it == std::end(in_progress_notifies)) { + logger().error("{} notify_id={} not found on the in-progess list." + " Supressing but this should not happen.", + __func__, notify_id); return seastar::now(); - }); + } + auto notify = *it; + logger().debug("Watch::notify_ack gid={} cookie={} found notify(id={})", + get_watcher_gid(), + get_cookie(), + notify->get_id()); + // let's ensure we're extending the life-time till end of this method + static_assert(std::is_same_v); + in_progress_notifies.erase(it); + return notify->complete_watcher(shared_from_this(), reply_bl); } seastar::future<> Watch::send_disconnect_msg() @@ -149,6 +160,7 @@ seastar::future<> Watch::send_disconnect_msg() void Watch::discard_state() { + logger().debug("{} gid={} cookie={}", __func__, get_watcher_gid(), get_cookie()); ceph_assert(obc); in_progress_notifies.clear(); timeout_timer.cancel(); @@ -165,7 +177,7 @@ void Watch::got_ping(utime_t) seastar::future<> Watch::remove() { - logger().info("{}", __func__); + logger().debug("{} gid={} cookie={}", __func__, get_watcher_gid(), get_cookie()); // in contrast to ceph-osd crimson sends CEPH_WATCH_EVENT_DISCONNECT directly // from the timeout handler and _after_ CEPH_WATCH_EVENT_NOTIFY_COMPLETE. // this simplifies the Watch::remove() interface as callers aren't obliged @@ -173,6 +185,10 @@ seastar::future<> Watch::remove() // becomes an implementation detail of Watch. return seastar::do_for_each(in_progress_notifies, [this_shared=shared_from_this()] (auto notify) { + logger().debug("Watch::remove gid={} cookie={} notify(id={})", + this_shared->get_watcher_gid(), + this_shared->get_cookie(), + notify->ninfo.notify_id); return notify->remove_watcher(this_shared); }).then([this] { discard_state(); @@ -182,7 +198,9 @@ seastar::future<> Watch::remove() void Watch::cancel_notify(const uint64_t notify_id) { - logger().info("{} notify_id={}", __func__, notify_id); + logger().debug("{} gid={} cookie={} notify(id={})", + __func__, get_watcher_gid(), get_cookie(), + notify_id); const auto it = in_progress_notifies.find(notify_id); assert(it != std::end(in_progress_notifies)); in_progress_notifies.erase(it); @@ -213,8 +231,7 @@ bool notify_reply_t::operator<(const notify_reply_t& rhs) const std::ostream &operator<<(std::ostream &out, const notify_reply_t &rhs) { out << "notify_reply_t{watcher_gid=" << rhs.watcher_gid - << ", watcher_cookie=" << rhs.watcher_cookie - << ", bl=" << rhs.bl << "}"; + << ", watcher_cookie=" << rhs.watcher_cookie << "}"; return out; } @@ -228,9 +245,19 @@ Notify::Notify(crimson::net::ConnectionRef conn, user_version(user_version) {} +Notify::~Notify() +{ + logger().debug("{} for notify(id={})", __func__, ninfo.notify_id); +} + seastar::future<> Notify::remove_watcher(WatchRef watch) { + logger().debug("{} for notify(id={})", __func__, ninfo.notify_id); + if (discarded || complete) { + logger().debug("{} for notify(id={}) discarded/complete already" + " discarded: {} complete: {}", __func__, + ninfo.notify_id, discarded ,complete); return seastar::now(); } [[maybe_unused]] const auto num_removed = watchers.erase(watch); @@ -250,7 +277,12 @@ seastar::future<> Notify::complete_watcher( WatchRef watch, const ceph::bufferlist& reply_bl) { + logger().debug("{} for notify(id={})", __func__, ninfo.notify_id); + if (discarded || complete) { + logger().debug("{} for notify(id={}) discarded/complete already" + " discarded: {} complete: {}", __func__, + ninfo.notify_id, discarded ,complete); return seastar::now(); } notify_replies.emplace(notify_reply_t{ diff --git a/ceph/src/crimson/osd/watch.h b/ceph/src/crimson/osd/watch.h index 0f7c9df54..b3982141d 100644 --- a/ceph/src/crimson/osd/watch.h +++ b/ceph/src/crimson/osd/watch.h @@ -140,6 +140,8 @@ class Notify : public seastar::enable_shared_from_this { [this] { do_notify_timeout(); } }; + ~Notify(); + /// (gid,cookie) -> reply_bl for everyone who acked the notify std::multiset notify_replies; diff --git a/ceph/src/crimson/tools/CMakeLists.txt b/ceph/src/crimson/tools/CMakeLists.txt index d57c3f9cf..fc18ff90b 100644 --- a/ceph/src/crimson/tools/CMakeLists.txt +++ b/ceph/src/crimson/tools/CMakeLists.txt @@ -15,4 +15,8 @@ add_executable(perf-async-msgr perf_async_msgr.cc) target_link_libraries(perf-async-msgr ceph-common global ${ALLOC_LIBS}) add_executable(perf-staged-fltree perf_staged_fltree.cc) +if(WITH_TESTS) +target_link_libraries(perf-staged-fltree crimson-seastore crimson::gtest) +else() target_link_libraries(perf-staged-fltree crimson-seastore) +endif() diff --git a/ceph/src/crimson/tools/perf_async_msgr.cc b/ceph/src/crimson/tools/perf_async_msgr.cc index de3367a6e..38cc84fbb 100644 --- a/ceph/src/crimson/tools/perf_async_msgr.cc +++ b/ceph/src/crimson/tools/perf_async_msgr.cc @@ -94,7 +94,9 @@ int main(int argc, char** argv) ("bs", po::value()->default_value(0), "server block size") ("crc-enabled", po::value()->default_value(false), - "enable CRC checks"); + "enable CRC checks") + ("threads", po::value()->default_value(3), + "async messenger worker threads"); po::variables_map vm; std::vector unrecognized_options; try { @@ -120,6 +122,7 @@ int main(int argc, char** argv) ceph_assert_always(target_addr.is_msgr2()); auto bs = vm["bs"].as(); auto crc_enabled = vm["crc-enabled"].as(); + auto worker_threads = vm["threads"].as(); std::vector args(argv, argv + argc); auto cct = global_init(nullptr, args, @@ -136,5 +139,13 @@ int main(int argc, char** argv) cct->_conf.set_val("ms_crc_data", "false"); } + cct->_conf.set_val("ms_async_op_threads", fmt::format("{}", worker_threads)); + + std::cout << "server[" << addr + << "](bs=" << bs + << ", crc_enabled=" << crc_enabled + << ", worker_threads=" << worker_threads + << std::endl; + run(cct.get(), target_addr, bs); } diff --git a/ceph/src/crimson/tools/perf_crimson_msgr.cc b/ceph/src/crimson/tools/perf_crimson_msgr.cc index ef5602b0f..aa5753442 100644 --- a/ceph/src/crimson/tools/perf_crimson_msgr.cc +++ b/ceph/src/crimson/tools/perf_crimson_msgr.cc @@ -2,19 +2,22 @@ // vim: ts=8 sw=2 smarttab #include -#include #include +#include #include #include #include +#include #include #include #include #include +#include #include "common/ceph_time.h" #include "messages/MOSDOp.h" +#include "include/random.h" #include "crimson/auth/DummyAuth.h" #include "crimson/common/log.h" @@ -22,10 +25,13 @@ #include "crimson/net/Connection.h" #include "crimson/net/Dispatcher.h" #include "crimson/net/Messenger.h" +#include "crimson/osd/stop_signal.h" using namespace std; using namespace std::chrono_literals; +using lowres_clock_t = seastar::lowres_system_clock; + namespace bpo = boost::program_options; namespace { @@ -54,6 +60,19 @@ seastar::future create_sharded(Args... args) { }); } +double get_reactor_utilization() { + auto &value_map = seastar::metrics::impl::get_value_map(); + auto found = value_map.find("reactor_utilization"); + assert(found != value_map.end()); + auto &[full_name, metric_family] = *found; + std::ignore = full_name; + assert(metric_family.size() == 1); + const auto& [labels, metric] = *metric_family.begin(); + std::ignore = labels; + auto value = (*metric)(); + return value.ui(); +} + enum class perf_mode_t { both, client, @@ -65,8 +84,10 @@ struct client_config { unsigned block_size; unsigned ramptime; unsigned msgtime; - unsigned jobs; + unsigned num_clients; + unsigned num_conns; unsigned depth; + bool skip_core_0; std::string str() const { std::ostringstream out; @@ -74,8 +95,10 @@ struct client_config { << "](bs=" << block_size << ", ramptime=" << ramptime << ", msgtime=" << msgtime - << ", jobs=" << jobs + << ", num_clients=" << num_clients + << ", num_conns=" << num_conns << ", depth=" << depth + << ", skip_core_0=" << skip_core_0 << ")"; return out.str(); } @@ -83,16 +106,19 @@ struct client_config { static client_config load(bpo::variables_map& options) { client_config conf; entity_addr_t addr; - ceph_assert(addr.parse(options["addr"].as().c_str(), nullptr)); + ceph_assert(addr.parse(options["server-addr"].as().c_str(), nullptr)); ceph_assert_always(addr.is_msgr2()); conf.server_addr = addr; - conf.block_size = options["cbs"].as(); + conf.block_size = options["client-bs"].as(); conf.ramptime = options["ramptime"].as(); conf.msgtime = options["msgtime"].as(); - conf.jobs = options["jobs"].as(); + conf.num_clients = options["clients"].as(); + ceph_assert_always(conf.num_clients > 0); + conf.num_conns = options["conns-per-client"].as(); + ceph_assert_always(conf.num_conns > 0); conf.depth = options["depth"].as(); - ceph_assert(conf.depth % conf.jobs == 0); + conf.skip_core_0 = options["client-skip-core-0"].as(); return conf; } }; @@ -100,12 +126,14 @@ struct client_config { struct server_config { entity_addr_t addr; unsigned block_size; + bool is_fixed_cpu; unsigned core; std::string str() const { std::ostringstream out; out << "server[" << addr << "](bs=" << block_size + << ", is_fixed_cpu=" << is_fixed_cpu << ", core=" << core << ")"; return out.str(); @@ -114,17 +142,18 @@ struct server_config { static server_config load(bpo::variables_map& options) { server_config conf; entity_addr_t addr; - ceph_assert(addr.parse(options["addr"].as().c_str(), nullptr)); + ceph_assert(addr.parse(options["server-addr"].as().c_str(), nullptr)); ceph_assert_always(addr.is_msgr2()); conf.addr = addr; - conf.block_size = options["sbs"].as(); - conf.core = options["core"].as(); + conf.block_size = options["server-bs"].as(); + conf.is_fixed_cpu = options["server-fixed-cpu"].as(); + conf.core = options["server-core"].as(); return conf; } }; -const unsigned SAMPLE_RATE = 7; +const unsigned SAMPLE_RATE = 256; static seastar::future<> run( perf_mode_t mode, @@ -133,30 +162,68 @@ static seastar::future<> run( bool crc_enabled) { struct test_state { - struct Server; - using ServerFRef = seastar::foreign_ptr>; - struct Server final - : public crimson::net::Dispatcher { + : public crimson::net::Dispatcher, + public seastar::peering_sharded_service { + // available only in msgr_sid crimson::net::MessengerRef msgr; crimson::auth::DummyAuthClientServer dummy_auth; const seastar::shard_id msgr_sid; std::string lname; + + bool is_fixed_cpu = true; + bool is_stopped = false; + std::optional> fut_report; + + unsigned conn_count = 0; + unsigned msg_count = 0; + MessageRef last_msg; + + // available in all shards unsigned msg_len; bufferlist msg_data; - Server(unsigned msg_len) - : msgr_sid{seastar::this_shard_id()}, + Server(seastar::shard_id msgr_sid, unsigned msg_len, bool needs_report) + : msgr_sid{msgr_sid}, msg_len{msg_len} { - lname = "server#"; - lname += std::to_string(msgr_sid); + lname = fmt::format("server@{}", msgr_sid); msg_data.append_zero(msg_len); + + if (seastar::this_shard_id() == msgr_sid && + needs_report) { + start_report(); + } + } + + void ms_handle_connect( + crimson::net::ConnectionRef, + seastar::shard_id) override { + ceph_abort("impossible, server won't connect"); + } + + void ms_handle_accept( + crimson::net::ConnectionRef, + seastar::shard_id new_shard, + bool is_replace) override { + ceph_assert_always(new_shard == seastar::this_shard_id()); + auto &server = container().local(); + ++server.conn_count; + } + + void ms_handle_reset( + crimson::net::ConnectionRef, + bool) override { + auto &server = container().local(); + --server.conn_count; } std::optional> ms_dispatch( crimson::net::ConnectionRef c, MessageRef m) override { + assert(c->get_shard_id() == seastar::this_shard_id()); ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); + auto &server = container().local(); + // server replies with MOSDOp to generate server-side write workload const static pg_t pgid; const static object_locator_t oloc; @@ -164,22 +231,32 @@ static seastar::future<> run( pgid.pool(), oloc.nspace); static spg_t spgid(pgid); auto rep = crimson::make_message(0, 0, hobj, spgid, 0, 0, 0); - bufferlist data(msg_data); - rep->write(0, msg_len, data); + bufferlist data(server.msg_data); + rep->write(0, server.msg_len, data); rep->set_tid(m->get_tid()); + ++server.msg_count; std::ignore = c->send(std::move(rep)); + + if (server.msg_count % 16 == 0) { + server.last_msg = std::move(m); + } return {seastar::now()}; } - seastar::future<> init(const entity_addr_t& addr) { - return seastar::smp::submit_to(msgr_sid, [addr, this] { + seastar::future<> init(const entity_addr_t& addr, bool is_fixed_cpu) { + return container().invoke_on( + msgr_sid, [addr, is_fixed_cpu](auto &server) { // server msgr is always with nonce 0 - msgr = crimson::net::Messenger::create(entity_name_t::OSD(msgr_sid), lname, 0); - msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0)); - msgr->set_auth_client(&dummy_auth); - msgr->set_auth_server(&dummy_auth); - return msgr->bind(entity_addrvec_t{addr}).safe_then([this] { - return msgr->start({this}); + server.msgr = crimson::net::Messenger::create( + entity_name_t::OSD(server.msgr_sid), + server.lname, 0, is_fixed_cpu); + server.msgr->set_default_policy(crimson::net::SocketPolicy::stateless_server(0)); + server.msgr->set_auth_client(&server.dummy_auth); + server.msgr->set_auth_server(&server.dummy_auth); + server.is_fixed_cpu = is_fixed_cpu; + return server.msgr->bind(entity_addrvec_t{addr} + ).safe_then([&server] { + return server.msgr->start({&server}); }, crimson::net::Messenger::bind_ertr::all_same_way( [addr] (const std::error_code& e) { logger().error("Server: " @@ -188,25 +265,161 @@ static seastar::future<> run( })); }); } + seastar::future<> shutdown() { logger().info("{} shutdown...", lname); - return seastar::smp::submit_to(msgr_sid, [this] { - ceph_assert(msgr); - msgr->stop(); - return msgr->shutdown(); + return container().invoke_on( + msgr_sid, [](auto &server) { + server.is_stopped = true; + ceph_assert(server.msgr); + server.msgr->stop(); + return server.msgr->shutdown( + ).then([&server] { + if (server.fut_report.has_value()) { + return std::move(server.fut_report.value()); + } else { + return seastar::now(); + } + }); }); } - seastar::future<> wait() { - return seastar::smp::submit_to(msgr_sid, [this] { - ceph_assert(msgr); - return msgr->wait(); - }); + + private: + struct ShardReport { + unsigned msg_count = 0; + + // per-interval metrics + double reactor_utilization; + unsigned conn_count = 0; + int msg_size = 0; + unsigned msg_count_interval = 0; + }; + + // should not be called frequently to impact performance + void get_report(ShardReport& last) { + unsigned last_msg_count = last.msg_count; + int msg_size = -1; + if (last_msg) { + auto msg = boost::static_pointer_cast(last_msg); + msg->finish_decode(); + ceph_assert_always(msg->ops.size() == 1); + msg_size = msg->ops[0].op.extent.length; + last_msg.reset(); + } + + last.msg_count = msg_count; + last.reactor_utilization = get_reactor_utilization(); + last.conn_count = conn_count; + last.msg_size = msg_size; + last.msg_count_interval = msg_count - last_msg_count; } - static seastar::future create(seastar::shard_id msgr_sid, unsigned msg_len) { - return seastar::smp::submit_to(msgr_sid, [msg_len] { - return seastar::make_foreign(std::make_unique(msg_len)); - }); + struct TimerReport { + unsigned elapsed = 0u; + mono_time start_time = mono_clock::zero(); + std::vector reports; + + TimerReport(unsigned shards) : reports(shards) {} + }; + + void start_report() { + seastar::promise<> pr_report; + fut_report = pr_report.get_future(); + seastar::do_with( + TimerReport(seastar::smp::count), + [this](auto &report) { + return seastar::do_until( + [this] { return is_stopped; }, + [&report, this] { + return seastar::sleep(2s + ).then([&report, this] { + report.elapsed += 2; + if (is_fixed_cpu) { + return seastar::smp::submit_to(msgr_sid, + [&report, this] { + auto &server = container().local(); + server.get_report(report.reports[seastar::this_shard_id()]); + }).then([&report, this] { + auto now = mono_clock::now(); + auto prv = report.start_time; + report.start_time = now; + if (prv == mono_clock::zero()) { + // cannot compute duration + return; + } + std::chrono::duration duration_d = now - prv; + double duration = duration_d.count(); + auto &ireport = report.reports[msgr_sid]; + double iops = ireport.msg_count_interval / duration; + double throughput_MB = -1; + if (ireport.msg_size >= 0) { + throughput_MB = iops * ireport.msg_size / 1048576; + } + std::ostringstream sout; + sout << setfill(' ') + << report.elapsed + << "(" << std::setw(5) << duration << ") " + << std::setw(9) << iops << "IOPS " + << std::setw(8) << throughput_MB << "MiB/s " + << ireport.reactor_utilization + << "(" << ireport.conn_count << ")"; + std::cout << sout.str() << std::endl; + }); + } else { + return seastar::smp::invoke_on_all([&report, this] { + auto &server = container().local(); + server.get_report(report.reports[seastar::this_shard_id()]); + }).then([&report, this] { + auto now = mono_clock::now(); + auto prv = report.start_time; + report.start_time = now; + if (prv == mono_clock::zero()) { + // cannot compute duration + return; + } + std::chrono::duration duration_d = now - prv; + double duration = duration_d.count(); + unsigned num_msgs = 0; + // -1 means unavailable, -2 means mismatch + int msg_size = -1; + for (auto &i : report.reports) { + if (i.msg_size >= 0) { + if (msg_size == -2) { + // pass + } else if (msg_size == -1) { + msg_size = i.msg_size; + } else { + if (msg_size != i.msg_size) { + msg_size = -2; + } + } + } + num_msgs += i.msg_count_interval; + } + double iops = num_msgs / duration; + double throughput_MB = msg_size; + if (msg_size >= 0) { + throughput_MB = iops * msg_size / 1048576; + } + std::ostringstream sout; + sout << setfill(' ') + << report.elapsed + << "(" << std::setw(5) << duration << ") " + << std::setw(9) << iops << "IOPS " + << std::setw(8) << throughput_MB << "MiB/s "; + for (auto &i : report.reports) { + sout << i.reactor_utilization + << "(" << i.conn_count << ") "; + } + std::cout << sout.str() << std::endl; + }); + } + }); + } + ); + }).then([this] { + logger().info("report is stopped!"); + }).forward_to(std::move(pr_report)); } }; @@ -223,106 +436,212 @@ static seastar::future<> run( unsigned start_count = 0u; unsigned sampled_count = 0u; - double total_lat_s = 0.0; + double sampled_total_lat_s = 0.0; // for reporting only mono_time finish_time = mono_clock::zero(); - void start() { + void start_connecting() { + connecting_time = mono_clock::now(); + } + + void finish_connecting() { + ceph_assert_always(connected_time == mono_clock::zero()); + connected_time = mono_clock::now(); + } + + void start_collect() { + ceph_assert_always(connected_time != mono_clock::zero()); start_time = mono_clock::now(); start_count = received_count; sampled_count = 0u; - total_lat_s = 0.0; + sampled_total_lat_s = 0.0; finish_time = mono_clock::zero(); } + + void prepare_summary(const ConnStats ¤t) { + *this = current; + finish_time = mono_clock::now(); + } }; - ConnStats conn_stats; struct PeriodStats { mono_time start_time = mono_clock::zero(); unsigned start_count = 0u; unsigned sampled_count = 0u; - double total_lat_s = 0.0; + double sampled_total_lat_s = 0.0; // for reporting only mono_time finish_time = mono_clock::zero(); unsigned finish_count = 0u; unsigned depth = 0u; - void reset(unsigned received_count, PeriodStats* snap = nullptr) { - if (snap) { - snap->start_time = start_time; - snap->start_count = start_count; - snap->sampled_count = sampled_count; - snap->total_lat_s = total_lat_s; - snap->finish_time = mono_clock::now(); - snap->finish_count = received_count; - } + void start_collect(unsigned received_count) { start_time = mono_clock::now(); start_count = received_count; sampled_count = 0u; - total_lat_s = 0.0; + sampled_total_lat_s = 0.0; + } + + void reset_period( + unsigned received_count, unsigned _depth, PeriodStats &snapshot) { + snapshot.start_time = start_time; + snapshot.start_count = start_count; + snapshot.sampled_count = sampled_count; + snapshot.sampled_total_lat_s = sampled_total_lat_s; + snapshot.finish_time = mono_clock::now(); + snapshot.finish_count = received_count; + snapshot.depth = _depth; + + start_collect(received_count); + } + }; + + struct JobReport { + std::string name; + unsigned depth = 0; + double connect_time_s = 0; + unsigned total_msgs = 0; + double messaging_time_s = 0; + double latency_ms = 0; + double iops = 0; + double throughput_mbps = 0; + + void account(const JobReport &stats) { + depth += stats.depth; + connect_time_s += stats.connect_time_s; + total_msgs += stats.total_msgs; + messaging_time_s += stats.messaging_time_s; + latency_ms += stats.latency_ms; + iops += stats.iops; + throughput_mbps += stats.throughput_mbps; + } + + void report() const { + auto str = fmt::format( + "{}(depth={}):\n" + " connect time: {:08f}s\n" + " messages received: {}\n" + " messaging time: {:08f}s\n" + " latency: {:08f}ms\n" + " IOPS: {:08f}\n" + " out throughput: {:08f}MB/s", + name, depth, connect_time_s, + total_msgs, messaging_time_s, + latency_ms, iops, + throughput_mbps); + std::cout << str << std::endl; + } + }; + + struct ConnectionPriv : public crimson::net::Connection::user_private_t { + unsigned index; + ConnectionPriv(unsigned i) : index{i} {} + }; + + struct ConnState { + crimson::net::MessengerRef msgr; + ConnStats conn_stats; + PeriodStats period_stats; + seastar::semaphore depth; + std::vector time_msgs_sent; + unsigned sent_count = 0u; + crimson::net::ConnectionRef active_conn; + bool stop_send = false; + seastar::promise stopped_send_promise; + + ConnState(std::size_t _depth) + : depth{_depth}, + time_msgs_sent{_depth, lowres_clock_t::time_point::min()} {} + + unsigned get_current_units() const { + ceph_assert(depth.available_units() >= 0); + return depth.current(); + } + + seastar::future stop_dispatch_messages() { + stop_send = true; + depth.broken(DepthBroken()); + return stopped_send_promise.get_future(); } }; - PeriodStats period_stats; const seastar::shard_id sid; - std::string lname; + const unsigned id; + const std::optional server_sid; - const unsigned jobs; - crimson::net::MessengerRef msgr; + const unsigned num_clients; + const unsigned num_conns; const unsigned msg_len; bufferlist msg_data; const unsigned nr_depth; - seastar::semaphore depth; - std::vector time_msgs_sent; + const unsigned nonce_base; crimson::auth::DummyAuthClientServer dummy_auth; - unsigned sent_count = 0u; - crimson::net::ConnectionRef active_conn = nullptr; + std::vector conn_states; - bool stop_send = false; - seastar::promise<> stopped_send_promise; - - Client(unsigned jobs, unsigned msg_len, unsigned depth) + Client(unsigned num_clients, + unsigned num_conns, + unsigned msg_len, + unsigned _depth, + unsigned nonce_base, + std::optional server_sid) : sid{seastar::this_shard_id()}, - jobs{jobs}, + id{sid + num_clients - seastar::smp::count}, + server_sid{server_sid}, + num_clients{num_clients}, + num_conns{num_conns}, msg_len{msg_len}, - nr_depth{depth/jobs}, - depth{nr_depth}, - time_msgs_sent{depth/jobs, mono_clock::zero()} { - lname = "client#"; - lname += std::to_string(sid); + nr_depth{_depth}, + nonce_base{nonce_base} { + if (is_active()) { + for (unsigned i = 0; i < num_conns; ++i) { + conn_states.emplace_back(nr_depth); + } + } msg_data.append_zero(msg_len); } - unsigned get_current_depth() const { - ceph_assert(depth.available_units() >= 0); - return nr_depth - depth.current(); + std::string get_name(unsigned i) { + return fmt::format("client{}Conn{}@{}", id, i, sid); } - void ms_handle_connect(crimson::net::ConnectionRef conn) override { - conn_stats.connected_time = mono_clock::now(); + void ms_handle_connect( + crimson::net::ConnectionRef conn, + seastar::shard_id prv_shard) override { + ceph_assert_always(prv_shard == seastar::this_shard_id()); + assert(is_active()); + unsigned index = static_cast(conn->get_user_private()).index; + auto &conn_state = conn_states[index]; + conn_state.conn_stats.finish_connecting(); } + std::optional> ms_dispatch( - crimson::net::ConnectionRef, MessageRef m) override { + crimson::net::ConnectionRef conn, MessageRef m) override { + assert(is_active()); // server replies with MOSDOp to generate server-side write workload ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); + unsigned index = static_cast(conn->get_user_private()).index; + assert(index < num_conns); + auto &conn_state = conn_states[index]; + auto msg_id = m->get_tid(); if (msg_id % SAMPLE_RATE == 0) { - auto index = msg_id % time_msgs_sent.size(); - ceph_assert(time_msgs_sent[index] != mono_clock::zero()); - std::chrono::duration cur_latency = mono_clock::now() - time_msgs_sent[index]; - conn_stats.total_lat_s += cur_latency.count(); - ++(conn_stats.sampled_count); - period_stats.total_lat_s += cur_latency.count(); - ++(period_stats.sampled_count); - time_msgs_sent[index] = mono_clock::zero(); + auto msg_index = msg_id % conn_state.time_msgs_sent.size(); + ceph_assert(conn_state.time_msgs_sent[msg_index] != + lowres_clock_t::time_point::min()); + std::chrono::duration cur_latency = + lowres_clock_t::now() - conn_state.time_msgs_sent[msg_index]; + conn_state.conn_stats.sampled_total_lat_s += cur_latency.count(); + ++(conn_state.conn_stats.sampled_count); + conn_state.period_stats.sampled_total_lat_s += cur_latency.count(); + ++(conn_state.period_stats.sampled_count); + conn_state.time_msgs_sent[msg_index] = lowres_clock_t::time_point::min(); } - ++(conn_stats.received_count); - depth.signal(1); + ++(conn_state.conn_stats.received_count); + conn_state.depth.signal(1); return {seastar::now()}; } @@ -330,49 +649,115 @@ static seastar::future<> run( // should start messenger at this shard? bool is_active() { ceph_assert(seastar::this_shard_id() == sid); - return sid != 0 && sid <= jobs; + return sid + num_clients >= seastar::smp::count; } seastar::future<> init() { - return container().invoke_on_all([] (auto& client) { + return container().invoke_on_all([](auto& client) { if (client.is_active()) { - client.msgr = crimson::net::Messenger::create(entity_name_t::OSD(client.sid), client.lname, client.sid); - client.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0)); - client.msgr->set_auth_client(&client.dummy_auth); - client.msgr->set_auth_server(&client.dummy_auth); - return client.msgr->start({&client}); + return seastar::do_for_each( + boost::make_counting_iterator(0u), + boost::make_counting_iterator(client.num_conns), + [&client](auto i) { + auto &conn_state = client.conn_states[i]; + std::string name = client.get_name(i); + conn_state.msgr = crimson::net::Messenger::create( + entity_name_t::OSD(client.id * client.num_conns + i), + name, client.nonce_base + client.id * client.num_conns + i, true); + conn_state.msgr->set_default_policy(crimson::net::SocketPolicy::lossy_client(0)); + conn_state.msgr->set_auth_client(&client.dummy_auth); + conn_state.msgr->set_auth_server(&client.dummy_auth); + return conn_state.msgr->start({&client}); + }); } return seastar::now(); }); } seastar::future<> shutdown() { - return container().invoke_on_all([] (auto& client) { - if (client.is_active()) { - logger().info("{} shutdown...", client.lname); - ceph_assert(client.msgr); - client.msgr->stop(); - return client.msgr->shutdown().then([&client] { - return client.stop_dispatch_messages(); + return seastar::do_with( + std::vector(num_clients * num_conns), + [this](auto &all_stats) { + return container().invoke_on_all([&all_stats](auto& client) { + if (!client.is_active()) { + return seastar::now(); + } + + return seastar::parallel_for_each( + boost::make_counting_iterator(0u), + boost::make_counting_iterator(client.num_conns), + [&all_stats, &client](auto i) { + logger().info("{} shutdown...", client.get_name(i)); + auto &conn_state = client.conn_states[i]; + return conn_state.stop_dispatch_messages( + ).then([&all_stats, &client, i](auto stats) { + all_stats[client.id * client.num_conns + i] = stats; + }); + }).then([&client] { + return seastar::do_for_each( + boost::make_counting_iterator(0u), + boost::make_counting_iterator(client.num_conns), + [&client](auto i) { + auto &conn_state = client.conn_states[i]; + ceph_assert(conn_state.msgr); + conn_state.msgr->stop(); + return conn_state.msgr->shutdown(); + }); }); - } - return seastar::now(); + }).then([&all_stats, this] { + auto nr_jobs = all_stats.size(); + JobReport summary; + std::vector clients(num_clients); + + for (unsigned i = 0; i < nr_jobs; ++i) { + auto &stats = all_stats[i]; + stats.report(); + clients[i / num_conns].account(stats); + summary.account(stats); + } + + std::cout << std::endl; + std::cout << "per client:" << std::endl; + for (unsigned i = 0; i < num_clients; ++i) { + auto &stats = clients[i]; + stats.name = fmt::format("client{}", i); + stats.connect_time_s /= num_conns; + stats.messaging_time_s /= num_conns; + stats.latency_ms /= num_conns; + stats.report(); + } + + std::cout << std::endl; + summary.name = fmt::format("all", nr_jobs); + summary.connect_time_s /= nr_jobs; + summary.messaging_time_s /= nr_jobs; + summary.latency_ms /= nr_jobs; + summary.report(); + }); }); } seastar::future<> connect_wait_verify(const entity_addr_t& peer_addr) { - return container().invoke_on_all([peer_addr] (auto& client) { - // start clients in active cores (#1 ~ #jobs) + return container().invoke_on_all([peer_addr](auto& client) { + // start clients in active cores if (client.is_active()) { - mono_time start_time = mono_clock::now(); - client.active_conn = client.msgr->connect(peer_addr, entity_name_t::TYPE_OSD); + for (unsigned i = 0; i < client.num_conns; ++i) { + auto &conn_state = client.conn_states[i]; + conn_state.conn_stats.start_connecting(); + conn_state.active_conn = conn_state.msgr->connect(peer_addr, entity_name_t::TYPE_OSD); + conn_state.active_conn->set_user_private( + std::make_unique(i)); + } // make sure handshake won't hurt the performance - return seastar::sleep(1s).then([&client, start_time] { - if (client.conn_stats.connected_time == mono_clock::zero()) { - logger().error("\n{} not connected after 1s!\n", client.lname); - ceph_assert(false); + return seastar::sleep(1s).then([&client] { + for (unsigned i = 0; i < client.num_conns; ++i) { + auto &conn_state = client.conn_states[i]; + if (conn_state.conn_stats.connected_time == mono_clock::zero()) { + logger().error("\n{} not connected after 1s!\n", + client.get_name(i)); + ceph_assert(false); + } } - client.conn_stats.connecting_time = start_time; }); } return seastar::now(); @@ -382,34 +767,43 @@ static seastar::future<> run( private: class TimerReport { private: - const unsigned jobs; + const unsigned num_clients; + const unsigned num_conns; const unsigned msgtime; const unsigned bytes_of_block; unsigned elapsed = 0u; - std::vector start_times; std::vector snaps; std::vector summaries; + std::vector client_reactor_utilizations; + std::optional server_reactor_utilization; public: - TimerReport(unsigned jobs, unsigned msgtime, unsigned bs) - : jobs{jobs}, + TimerReport(unsigned num_clients, unsigned num_conns, unsigned msgtime, unsigned bs) + : num_clients{num_clients}, + num_conns{num_conns}, msgtime{msgtime}, bytes_of_block{bs}, - start_times{jobs, mono_clock::zero()}, - snaps{jobs}, - summaries{jobs} {} + snaps{num_clients * num_conns}, + summaries{num_clients * num_conns}, + client_reactor_utilizations(num_clients) {} unsigned get_elapsed() const { return elapsed; } - PeriodStats& get_snap_by_job(seastar::shard_id sid) { - ceph_assert(sid >= 1 && sid <= jobs); - return snaps[sid - 1]; + PeriodStats& get_snap(unsigned client_id, unsigned i) { + return snaps[client_id * num_conns + i]; } - ConnStats& get_summary_by_job(seastar::shard_id sid) { - ceph_assert(sid >= 1 && sid <= jobs); - return summaries[sid - 1]; + ConnStats& get_summary(unsigned client_id, unsigned i) { + return summaries[client_id * num_conns + i]; + } + + void set_client_reactor_utilization(unsigned client_id, double ru) { + client_reactor_utilizations[client_id] = ru; + } + + void set_server_reactor_utilization(double ru) { + server_reactor_utilization = ru; } bool should_stop() const { @@ -422,45 +816,50 @@ static seastar::future<> run( }); } - void report_header() { + void report_header() const { std::ostringstream sout; sout << std::setfill(' ') - << std::setw(7) << "sec" - << std::setw(6) << "depth" - << std::setw(8) << "IOPS" - << std::setw(8) << "MB/s" - << std::setw(8) << "lat(ms)"; + << std::setw(6) << "sec" + << std::setw(7) << "depth" + << std::setw(10) << "IOPS" + << std::setw(9) << "MB/s" + << std::setw(9) << "lat(ms)"; std::cout << sout.str() << std::endl; } void report_period() { - if (elapsed == 1) { - // init this->start_times at the first period - for (unsigned i=0; i elapsed_d = 0s; unsigned depth = 0u; unsigned ops = 0u; unsigned sampled_count = 0u; - double total_lat_s = 0.0; + double sampled_total_lat_s = 0.0; for (const auto& snap: snaps) { elapsed_d += (snap.finish_time - snap.start_time); depth += snap.depth; ops += (snap.finish_count - snap.start_count); sampled_count += snap.sampled_count; - total_lat_s += snap.total_lat_s; + sampled_total_lat_s += snap.sampled_total_lat_s; } - double elapsed_s = elapsed_d.count() / jobs; + double elapsed_s = elapsed_d.count() / (num_clients * num_conns); double iops = ops/elapsed_s; std::ostringstream sout; sout << setfill(' ') - << std::setw(7) << elapsed_s + << std::setw(5) << elapsed_s + << " " << std::setw(6) << depth - << std::setw(8) << iops + << " " + << std::setw(9) << iops + << " " << std::setw(8) << iops * bytes_of_block / 1048576 - << std::setw(8) << (total_lat_s / sampled_count * 1000); + << " " + << std::setw(8) << (sampled_total_lat_s / sampled_count * 1000) + << " -- "; + if (server_reactor_utilization.has_value()) { + sout << *server_reactor_utilization << " -- "; + } + for (double cru : client_reactor_utilizations) { + sout << cru << ","; + } std::cout << sout.str() << std::endl; } @@ -468,14 +867,14 @@ static seastar::future<> run( std::chrono::duration elapsed_d = 0s; unsigned ops = 0u; unsigned sampled_count = 0u; - double total_lat_s = 0.0; + double sampled_total_lat_s = 0.0; for (const auto& summary: summaries) { elapsed_d += (summary.finish_time - summary.start_time); ops += (summary.received_count - summary.start_count); sampled_count += summary.sampled_count; - total_lat_s += summary.total_lat_s; + sampled_total_lat_s += summary.sampled_total_lat_s; } - double elapsed_s = elapsed_d.count() / jobs; + double elapsed_s = elapsed_d.count() / (num_clients * num_conns); double iops = ops / elapsed_s; std::ostringstream sout; sout << "--------------" @@ -486,7 +885,7 @@ static seastar::future<> run( << std::setw(6) << "-" << std::setw(8) << iops << std::setw(8) << iops * bytes_of_block / 1048576 - << std::setw(8) << (total_lat_s / sampled_count * 1000) + << std::setw(8) << (sampled_total_lat_s / sampled_count * 1000) << "\n"; std::cout << sout.str() << std::endl; } @@ -495,10 +894,20 @@ static seastar::future<> run( seastar::future<> report_period(TimerReport& report) { return container().invoke_on_all([&report] (auto& client) { if (client.is_active()) { - PeriodStats& snap = report.get_snap_by_job(client.sid); - client.period_stats.reset(client.conn_stats.received_count, - &snap); - snap.depth = client.get_current_depth(); + for (unsigned i = 0; i < client.num_conns; ++i) { + auto &conn_state = client.conn_states[i]; + PeriodStats& snap = report.get_snap(client.id, i); + conn_state.period_stats.reset_period( + conn_state.conn_stats.received_count, + client.nr_depth - conn_state.get_current_units(), + snap); + } + report.set_client_reactor_utilization(client.id, get_reactor_utilization()); + } + if (client.server_sid.has_value() && + seastar::this_shard_id() == *client.server_sid) { + assert(!client.is_active()); + report.set_server_reactor_utilization(get_reactor_utilization()); } }).then([&report] { report.report_period(); @@ -508,9 +917,11 @@ static seastar::future<> run( seastar::future<> report_summary(TimerReport& report) { return container().invoke_on_all([&report] (auto& client) { if (client.is_active()) { - ConnStats& summary = report.get_summary_by_job(client.sid); - summary = client.conn_stats; - summary.finish_time = mono_clock::now(); + for (unsigned i = 0; i < client.num_conns; ++i) { + auto &conn_state = client.conn_states[i]; + ConnStats& summary = report.get_summary(client.id, i); + summary.prepare_summary(conn_state.conn_stats); + } } }).then([&report] { report.report_summary(); @@ -519,10 +930,13 @@ static seastar::future<> run( public: seastar::future<> dispatch_with_timer(unsigned ramptime, unsigned msgtime) { - logger().info("[all clients]: start sending MOSDOps from {} clients", jobs); + logger().info("[all clients]: start sending MOSDOps from {} clients * {} conns", + num_clients, num_conns); return container().invoke_on_all([] (auto& client) { if (client.is_active()) { - client.do_dispatch_messages(client.active_conn.get()); + for (unsigned i = 0; i < client.num_conns; ++i) { + client.do_dispatch_messages(i); + } } }).then([ramptime] { logger().info("[all clients]: ramping up {} seconds...", ramptime); @@ -530,14 +944,18 @@ static seastar::future<> run( }).then([this] { return container().invoke_on_all([] (auto& client) { if (client.is_active()) { - client.conn_stats.start(); - client.period_stats.reset(client.conn_stats.received_count); + for (unsigned i = 0; i < client.num_conns; ++i) { + auto &conn_state = client.conn_states[i]; + conn_state.conn_stats.start_collect(); + conn_state.period_stats.start_collect(conn_state.conn_stats.received_count); + } } }); }).then([this, msgtime] { logger().info("[all clients]: reporting {} seconds...\n", msgtime); return seastar::do_with( - TimerReport(jobs, msgtime, msg_len), [this] (auto& report) { + TimerReport(num_clients, num_conns, msgtime, msg_len), + [this](auto& report) { report.report_header(); return seastar::do_until( [&report] { return report.should_stop(); }, @@ -567,9 +985,11 @@ static seastar::future<> run( } private: - seastar::future<> send_msg(crimson::net::Connection* conn) { + seastar::future<> send_msg(ConnState &conn_state) { ceph_assert(seastar::this_shard_id() == sid); - return depth.wait(1).then([this, conn] { + conn_state.sent_count += 1; + return conn_state.depth.wait(1 + ).then([this, &conn_state] { const static pg_t pgid; const static object_locator_t oloc; const static hobject_t hobj(object_t(), oloc.key, CEPH_NOSNAP, pgid.ps(), @@ -579,89 +999,132 @@ static seastar::future<> run( bufferlist data(msg_data); m->write(0, msg_len, data); // use tid as the identity of each round - m->set_tid(sent_count); + m->set_tid(conn_state.sent_count); // sample message latency - if (sent_count % SAMPLE_RATE == 0) { - auto index = sent_count % time_msgs_sent.size(); - ceph_assert(time_msgs_sent[index] == mono_clock::zero()); - time_msgs_sent[index] = mono_clock::now(); + if (unlikely(conn_state.sent_count % SAMPLE_RATE == 0)) { + auto index = conn_state.sent_count % conn_state.time_msgs_sent.size(); + ceph_assert(conn_state.time_msgs_sent[index] == + lowres_clock_t::time_point::min()); + conn_state.time_msgs_sent[index] = lowres_clock_t::now(); } - return conn->send(std::move(m)); + return conn_state.active_conn->send(std::move(m)); }); } class DepthBroken: public std::exception {}; - seastar::future<> stop_dispatch_messages() { - stop_send = true; - depth.broken(DepthBroken()); - return stopped_send_promise.get_future(); + seastar::future stop_dispatch_messages(unsigned i) { + auto &conn_state = conn_states[i]; + conn_state.stop_send = true; + conn_state.depth.broken(DepthBroken()); + return conn_state.stopped_send_promise.get_future(); } - void do_dispatch_messages(crimson::net::Connection* conn) { + void do_dispatch_messages(unsigned i) { ceph_assert(seastar::this_shard_id() == sid); - ceph_assert(sent_count == 0); - conn_stats.start_time = mono_clock::now(); + auto &conn_state = conn_states[i]; + ceph_assert(conn_state.sent_count == 0); + conn_state.conn_stats.start_time = mono_clock::now(); // forwarded to stopped_send_promise (void) seastar::do_until( - [this] { return stop_send; }, - [this, conn] { - sent_count += 1; - return send_msg(conn); - } + [&conn_state] { return conn_state.stop_send; }, + [this, &conn_state] { return send_msg(conn_state); } ).handle_exception_type([] (const DepthBroken& e) { // ok, stopped by stop_dispatch_messages() - }).then([this, conn] { - std::chrono::duration dur_conn = conn_stats.connected_time - conn_stats.connecting_time; - std::chrono::duration dur_msg = mono_clock::now() - conn_stats.start_time; - unsigned ops = conn_stats.received_count - conn_stats.start_count; - logger().info("{}: stopped sending OSDOPs.\n" - "{}(depth={}):\n" - " connect time: {}s\n" - " messages received: {}\n" - " messaging time: {}s\n" - " latency: {}ms\n" - " IOPS: {}\n" - " throughput: {}MB/s\n", - *conn, - lname, - nr_depth, - dur_conn.count(), - ops, - dur_msg.count(), - conn_stats.total_lat_s / conn_stats.sampled_count * 1000, - ops / dur_msg.count(), - ops / dur_msg.count() * msg_len / 1048576); - stopped_send_promise.set_value(); + }).then([this, &conn_state, i] { + std::string name = get_name(i); + logger().info("{} {}: stopped sending OSDOPs", + name, *conn_state.active_conn); + + std::chrono::duration dur_conn = + conn_state.conn_stats.connected_time - + conn_state.conn_stats.connecting_time; + std::chrono::duration dur_msg = + mono_clock::now() - conn_state.conn_stats.start_time; + unsigned ops = + conn_state.conn_stats.received_count - + conn_state.conn_stats.start_count; + + JobReport stats; + stats.name = name; + stats.depth = nr_depth; + stats.connect_time_s = dur_conn.count(); + stats.total_msgs = ops; + stats.messaging_time_s = dur_msg.count(); + stats.latency_ms = + conn_state.conn_stats.sampled_total_lat_s / + conn_state.conn_stats.sampled_count * 1000; + stats.iops = ops / dur_msg.count(); + stats.throughput_mbps = ops / dur_msg.count() * msg_len / 1048576; + + conn_state.stopped_send_promise.set_value(stats); }); } }; }; + std::optional server_sid; + bool server_needs_report = false; + if (mode == perf_mode_t::both) { + ceph_assert(server_conf.is_fixed_cpu == true); + server_sid = server_conf.core; + } else if (mode == perf_mode_t::server) { + server_needs_report = true; + } return seastar::when_all( - test_state::Server::create(server_conf.core, server_conf.block_size), - create_sharded(client_conf.jobs, client_conf.block_size, client_conf.depth), - crimson::common::sharded_conf().start(EntityName{}, std::string_view{"ceph"}).then([] { - return crimson::common::local_conf().start(); - }).then([crc_enabled] { - return crimson::common::local_conf().set_val( - "ms_crc_data", crc_enabled ? "true" : "false"); - }) + seastar::futurize_invoke([mode, server_conf, server_needs_report] { + if (mode == perf_mode_t::client) { + return seastar::make_ready_future(nullptr); + } else { + return create_sharded( + server_conf.core, + server_conf.block_size, + server_needs_report); + } + }), + seastar::futurize_invoke([mode, client_conf, server_sid] { + if (mode == perf_mode_t::server) { + return seastar::make_ready_future(nullptr); + } else { + unsigned nonce_base = ceph::util::generate_random_number(); + logger().info("client nonce_base={}", nonce_base); + return create_sharded( + client_conf.num_clients, + client_conf.num_conns, + client_conf.block_size, + client_conf.depth, + nonce_base, + server_sid); + } + }), + crimson::common::sharded_conf().start( + EntityName{}, std::string_view{"ceph"} + ).then([] { + return crimson::common::local_conf().start(); + }).then([crc_enabled] { + return crimson::common::local_conf().set_val( + "ms_crc_data", crc_enabled ? "true" : "false"); + }) ).then([=](auto&& ret) { - auto fp_server = std::move(std::get<0>(ret).get0()); + auto server = std::move(std::get<0>(ret).get0()); auto client = std::move(std::get<1>(ret).get0()); - test_state::Server* server = fp_server.get(); + // reserve core 0 for potentially better performance if (mode == perf_mode_t::both) { - logger().info("\nperf settings:\n {}\n {}\n", - client_conf.str(), server_conf.str()); - ceph_assert(seastar::smp::count >= 1+client_conf.jobs); - ceph_assert(client_conf.jobs > 0); - ceph_assert(seastar::smp::count >= 1+server_conf.core); - ceph_assert(server_conf.core == 0 || server_conf.core > client_conf.jobs); + logger().info("\nperf settings:\n smp={}\n {}\n {}\n", + seastar::smp::count, client_conf.str(), server_conf.str()); + if (client_conf.skip_core_0) { + ceph_assert(seastar::smp::count > client_conf.num_clients); + } else { + ceph_assert(seastar::smp::count >= client_conf.num_clients); + } + ceph_assert(client_conf.num_clients > 0); + ceph_assert(seastar::smp::count > server_conf.core + client_conf.num_clients); return seastar::when_all_succeed( - server->init(server_conf.addr), + // it is not reasonable to allow server/client to shared cores for + // performance benchmarking purposes. + server->init(server_conf.addr, server_conf.is_fixed_cpu), client->init() ).then_unpack([client, addr = client_conf.server_addr] { return client->connect_wait_verify(addr); @@ -670,13 +1133,18 @@ static seastar::future<> run( return client->dispatch_with_timer(ramptime, msgtime); }).then([client] { return client->shutdown(); - }).then([server, fp_server = std::move(fp_server)] () mutable { - return server->shutdown().then([cleanup = std::move(fp_server)] {}); + }).then([server] { + return server->shutdown(); }); } else if (mode == perf_mode_t::client) { - logger().info("\nperf settings:\n {}\n", client_conf.str()); - ceph_assert(seastar::smp::count >= 1+client_conf.jobs); - ceph_assert(client_conf.jobs > 0); + logger().info("\nperf settings:\n smp={}\n {}\n", + seastar::smp::count, client_conf.str()); + if (client_conf.skip_core_0) { + ceph_assert(seastar::smp::count > client_conf.num_clients); + } else { + ceph_assert(seastar::smp::count >= client_conf.num_clients); + } + ceph_assert(client_conf.num_clients > 0); return client->init( ).then([client, addr = client_conf.server_addr] { return client->connect_wait_verify(addr); @@ -687,15 +1155,15 @@ static seastar::future<> run( return client->shutdown(); }); } else { // mode == perf_mode_t::server - ceph_assert(seastar::smp::count >= 1+server_conf.core); - logger().info("\nperf settings:\n {}\n", server_conf.str()); - return server->init(server_conf.addr - // dispatch ops - ).then([server] { - return server->wait(); - // shutdown - }).then([server, fp_server = std::move(fp_server)] () mutable { - return server->shutdown().then([cleanup = std::move(fp_server)] {}); + ceph_assert(seastar::smp::count > server_conf.core); + logger().info("\nperf settings:\n smp={}\n {}\n", + seastar::smp::count, server_conf.str()); + return seastar::async([server, server_conf] { + // FIXME: SIGINT is not received by stop_signal + seastar_apps_lib::stop_signal should_stop; + server->init(server_conf.addr, server_conf.is_fixed_cpu).get(); + should_stop.wait().get(); + server->shutdown().get(); }); } }).finally([] { @@ -711,21 +1179,27 @@ int main(int argc, char** argv) app.add_options() ("mode", bpo::value()->default_value(0), "0: both, 1:client, 2:server") - ("addr", bpo::value()->default_value("v2:127.0.0.1:9010"), + ("server-addr", bpo::value()->default_value("v2:127.0.0.1:9010"), "server address(only support msgr v2 protocol)") ("ramptime", bpo::value()->default_value(5), "seconds of client ramp-up time") ("msgtime", bpo::value()->default_value(15), "seconds of client messaging time") - ("jobs", bpo::value()->default_value(1), - "number of client jobs (messengers)") - ("cbs", bpo::value()->default_value(4096), + ("clients", bpo::value()->default_value(1), + "number of client messengers") + ("conns-per-client", bpo::value()->default_value(1), + "number of connections per client") + ("client-bs", bpo::value()->default_value(4096), "client block size") ("depth", bpo::value()->default_value(512), - "client io depth") - ("core", bpo::value()->default_value(0), - "server running core") - ("sbs", bpo::value()->default_value(0), + "client io depth per job") + ("client-skip-core-0", bpo::value()->default_value(true), + "client skip core 0") + ("server-fixed-cpu", bpo::value()->default_value(true), + "server is in the fixed cpu mode, non-fixed doesn't support the mode both") + ("server-core", bpo::value()->default_value(1), + "server messenger running core") + ("server-bs", bpo::value()->default_value(0), "server block size") ("crc-enabled", bpo::value()->default_value(false), "enable CRC checks"); diff --git a/ceph/src/exporter/DaemonMetricCollector.cc b/ceph/src/exporter/DaemonMetricCollector.cc index f4f7240fc..ebe85c304 100644 --- a/ceph/src/exporter/DaemonMetricCollector.cc +++ b/ceph/src/exporter/DaemonMetricCollector.cc @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -146,18 +147,19 @@ void DaemonMetricCollector::dump_asok_metrics() { std::string counter_name = perf_group + "_" + counter_name_init; promethize(counter_name); - if (counters_labels.empty()) { - auto labels_and_name = get_labels_and_metric_name(daemon_name, counter_name); - labels = labels_and_name.first; - counter_name = labels_and_name.second; + auto extra_labels = get_extra_labels(daemon_name); + if (extra_labels.empty()) { + dout(1) << "Unable to parse instance_id from daemon_name: " << daemon_name << dendl; + continue; } + labels.insert(extra_labels.begin(), extra_labels.end()); + // For now this is only required for rgw multi-site metrics auto multisite_labels_and_name = add_fixed_name_metrics(counter_name); if (!multisite_labels_and_name.first.empty()) { labels.insert(multisite_labels_and_name.first.begin(), multisite_labels_and_name.first.end()); counter_name = multisite_labels_and_name.second; } - labels.insert({"ceph_daemon", quote(daemon_name)}); auto perf_values = counters_values.at(counter_name_init); dump_asok_metric(counter_group, perf_values, counter_name, labels); } @@ -285,12 +287,16 @@ std::string DaemonMetricCollector::asok_request(AdminSocketClient &asok, return response; } -std::pair -DaemonMetricCollector::get_labels_and_metric_name(std::string daemon_name, - std::string metric_name) { - std::string new_metric_name; +labels_t DaemonMetricCollector::get_extra_labels(std::string daemon_name) { labels_t labels; - new_metric_name = metric_name; + const std::string ceph_daemon_prefix = "ceph-"; + const std::string ceph_client_prefix = "client."; + if (daemon_name.rfind(ceph_daemon_prefix, 0) == 0) { + daemon_name = daemon_name.substr(ceph_daemon_prefix.size()); + } + if (daemon_name.rfind(ceph_client_prefix, 0) == 0) { + daemon_name = daemon_name.substr(ceph_client_prefix.size()); + } // In vstart cluster socket files for rgw are stored as radosgw..asok if (daemon_name.find("radosgw") != std::string::npos) { std::size_t pos = daemon_name.find_last_of('.'); @@ -298,23 +304,23 @@ DaemonMetricCollector::get_labels_and_metric_name(std::string daemon_name, labels["instance_id"] = quote(tmp); } else if (daemon_name.find("rgw") != std::string::npos) { - std::string tmp = daemon_name.substr(16, std::string::npos); - std::string::size_type pos = tmp.find('.'); - labels["instance_id"] = quote("rgw." + tmp.substr(0, pos)); - } - else if (daemon_name.find("rbd-mirror") != std::string::npos) { - std::regex re( - "^rbd_mirror_image_([^/]+)/(?:(?:([^/]+)/" - ")?)(.*)\\.(replay(?:_bytes|_latency)?)$"); - std::smatch match; - if (std::regex_search(daemon_name, match, re) == true) { - new_metric_name = "ceph_rbd_mirror_image_" + match.str(4); - labels["pool"] = quote(match.str(1)); - labels["namespace"] = quote(match.str(2)); - labels["image"] = quote(match.str(3)); + // fetch intance_id for e.g. "hrgsea" from daemon_name=rgw.foo.ceph-node-00.hrgsea.2.94739968030880 + std::vector elems; + std::stringstream ss; + ss.str(daemon_name); + std::string item; + while (std::getline(ss, item, '.')) { + elems.push_back(item); } + if (elems.size() >= 4) { + labels["instance_id"] = quote(elems[3]); + } else { + return labels_t(); + } + } else { + labels.insert({"ceph_daemon", quote(daemon_name)}); } - return {labels, new_metric_name}; + return labels; } // Add fixed name metrics from existing ones that have details in their names diff --git a/ceph/src/exporter/DaemonMetricCollector.h b/ceph/src/exporter/DaemonMetricCollector.h index 3d35a9c79..e906fb13a 100644 --- a/ceph/src/exporter/DaemonMetricCollector.h +++ b/ceph/src/exporter/DaemonMetricCollector.h @@ -34,6 +34,7 @@ class DaemonMetricCollector { public: void main(); std::string get_metrics(); + labels_t get_extra_labels(std::string daemon_name); private: std::map clients; @@ -47,8 +48,6 @@ private: void dump_asok_metric(boost::json::object perf_info, boost::json::value perf_values, std::string name, labels_t labels); - std::pair - get_labels_and_metric_name(std::string daemon_name, std::string metric_name); std::pair add_fixed_name_metrics(std::string metric_name); void get_process_metrics(std::vector> daemon_pids); std::string asok_request(AdminSocketClient &asok, std::string command, std::string daemon_name); diff --git a/ceph/src/include/ceph_fs.h b/ceph/src/include/ceph_fs.h index 1a75a5193..28440c820 100644 --- a/ceph/src/include/ceph_fs.h +++ b/ceph/src/include/ceph_fs.h @@ -418,6 +418,7 @@ enum { CEPH_MDS_OP_RMSNAP = 0x01401, CEPH_MDS_OP_LSSNAP = 0x00402, CEPH_MDS_OP_RENAMESNAP = 0x01403, + CEPH_MDS_OP_READDIR_SNAPDIFF = 0x01404, // internal op CEPH_MDS_OP_FRAGMENTDIR= 0x01500, @@ -429,6 +430,11 @@ enum { CEPH_MDS_OP_RDLOCK_FRAGSSTATS = 0x01507 }; +#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \ + op == CEPH_MDS_OP_MKNOD || \ + op == CEPH_MDS_OP_MKDIR || \ + op == CEPH_MDS_OP_SYMLINK) + extern const char *ceph_mds_op_name(int op); // setattr mask is an int @@ -473,12 +479,12 @@ int ceph_flags_sys2wire(int flags); #define CEPH_XATTR_REMOVE (1 << 31) /* - * readdir request flags; + * readdir/readdir_snapdiff request flags; */ #define CEPH_READDIR_REPLY_BITFLAGS (1<<0) /* - * readdir reply flags. + * readdir/readdir_snapdiff reply flags. */ #define CEPH_READDIR_FRAG_END (1<<0) #define CEPH_READDIR_FRAG_COMPLETE (1<<8) @@ -622,9 +628,17 @@ union ceph_mds_request_args { __le64 parent; __le32 hash; } __attribute__ ((packed)) lookupino; + struct { + __le32 frag; /* which dir fragment */ + __le32 max_entries; /* how many dentries to grab */ + __le32 max_bytes; + __le16 flags; + __le32 offset_hash; + __le64 snap_other; + } __attribute__ ((packed)) snapdiff; } __attribute__ ((packed)); -#define CEPH_MDS_REQUEST_HEAD_VERSION 2 +#define CEPH_MDS_REQUEST_HEAD_VERSION 3 /* * Note that any change to this structure must ensure that it is compatible @@ -645,9 +659,12 @@ struct ceph_mds_request_head { __le32 ext_num_retry; /* new count retry attempts */ __le32 ext_num_fwd; /* new count fwd attempts */ + + __le32 struct_len; /* to store size of struct ceph_mds_request_head */ + __le32 owner_uid, owner_gid; /* used for OPs which create inodes */ } __attribute__ ((packed)); -void inline encode(const struct ceph_mds_request_head& h, ceph::buffer::list& bl, bool old_version) { +void inline encode(const struct ceph_mds_request_head& h, ceph::buffer::list& bl) { using ceph::encode; encode(h.version, bl); encode(h.oldest_client_tid, bl); @@ -667,14 +684,30 @@ void inline encode(const struct ceph_mds_request_head& h, ceph::buffer::list& bl encode(h.ino, bl); bl.append((char*)&h.args, sizeof(h.args)); - if (!old_version) { + if (h.version >= 2) { encode(h.ext_num_retry, bl); encode(h.ext_num_fwd, bl); } + + if (h.version >= 3) { + __u32 struct_len = sizeof(struct ceph_mds_request_head); + encode(struct_len, bl); + encode(h.owner_uid, bl); + encode(h.owner_gid, bl); + + /* + * Please, add new fields handling here. + * You don't need to check h.version as we do it + * in decode(), because decode can properly skip + * all unsupported fields if h.version >= 3. + */ + } } void inline decode(struct ceph_mds_request_head& h, ceph::buffer::list::const_iterator& bl) { using ceph::decode; + unsigned struct_end = bl.get_off(); + decode(h.version, bl); decode(h.oldest_client_tid, bl); decode(h.mdsmap_epoch, bl); @@ -695,6 +728,42 @@ void inline decode(struct ceph_mds_request_head& h, ceph::buffer::list::const_it h.ext_num_retry = h.num_retry; h.ext_num_fwd = h.num_fwd; } + + if (h.version >= 3) { + decode(h.struct_len, bl); + struct_end += h.struct_len; + + decode(h.owner_uid, bl); + decode(h.owner_gid, bl); + } else { + /* + * client is old: let's take caller_{u,g}id as owner_{u,g}id + * this is how it worked before adding of owner_{u,g}id fields. + */ + h.owner_uid = h.caller_uid; + h.owner_gid = h.caller_gid; + } + + /* add new fields handling here */ + + /* + * From version 3 we have struct_len field. + * It allows us to properly handle a case + * when client send struct ceph_mds_request_head + * bigger in size than MDS supports. In this + * case we just want to skip all remaining bytes + * at the end. + * + * See also DECODE_FINISH macro. Unfortunately, + * we can't start using it right now as it will be + * an incompatible protocol change. + */ + if (h.version >= 3) { + if (bl.get_off() > struct_end) + throw ::ceph::buffer::malformed_input(DECODE_ERR_PAST(__PRETTY_FUNCTION__)); + if (bl.get_off() < struct_end) + bl += struct_end - bl.get_off(); + } } /* cap/lease release record */ diff --git a/ceph/src/include/cephfs/libcephfs.h b/ceph/src/include/cephfs/libcephfs.h index 62e0b51c2..dc62698fa 100644 --- a/ceph/src/include/cephfs/libcephfs.h +++ b/ceph/src/include/cephfs/libcephfs.h @@ -27,6 +27,7 @@ #include #include #include +#include #include "ceph_ll_client.h" @@ -112,6 +113,11 @@ struct snap_info { struct snap_metadata *snap_metadata; }; +struct ceph_snapdiff_entry_t { + struct dirent dir_entry; + uint64_t snapid; //should be snapid_t but prefer not to exposure it +}; + /* setattr mask bits (up to an int in size) */ #ifndef CEPH_SETATTR_MODE #define CEPH_SETATTR_MODE (1 << 0) @@ -609,6 +615,53 @@ int ceph_readdir_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, int ceph_readdirplus_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de, struct ceph_statx *stx, unsigned want, unsigned flags, struct Inode **out); +struct ceph_snapdiff_info +{ + struct ceph_mount_info* cmount; + struct ceph_dir_result* dir1; // primary dir entry to build snapdiff for. + struct ceph_dir_result* dir_aux; // aux dir entry to identify the second snapshot. + // Can point to the parent dir entry if entry-in-question + // doesn't exist in the second snapshot +}; + +/** + * Opens snapdiff stream to get snapshots delta (aka snapdiff). + * + * @param cmount the ceph mount handle to use for snapdiff retrieval. + * @param root_path root path for snapshots-in-question + * @param rel_path subpath under the root to build delta for + * @param snap1 the first snapshot name + * @param snap2 the second snapshot name + * @param out resulting snapdiff stream handle to be used for snapdiff results + retrieval via ceph_readdir_snapdiff + * @returns 0 on success and negative error code otherwise + */ +int ceph_open_snapdiff(struct ceph_mount_info* cmount, + const char* root_path, + const char* rel_path, + const char* snap1, + const char* snap2, + struct ceph_snapdiff_info* out); +/** + * Get the next snapshot delta entry. + * + * @param info snapdiff stream handle opened via ceph_open_snapdiff() + * @param out the next snapdiff entry which includes directory entry and the + * entry's snapshot id - later one for emerged/existing entry or + * former snapshot id for the removed entry. + * @returns >0 on success, 0 if no more entries in the stream and negative + * error code otherwise + */ +int ceph_readdir_snapdiff(struct ceph_snapdiff_info* snapdiff, + struct ceph_snapdiff_entry_t* out); +/** + * Close snapdiff stream. + * + * @param info snapdiff stream handle opened via ceph_open_snapdiff() + * @returns 0 on success and negative error code otherwise + */ +int ceph_close_snapdiff(struct ceph_snapdiff_info* snapdiff); + /** * Gets multiple directory entries. * diff --git a/ceph/src/include/compat.h b/ceph/src/include/compat.h index c65a6ae44..1100d69eb 100644 --- a/ceph/src/include/compat.h +++ b/ceph/src/include/compat.h @@ -259,9 +259,6 @@ typedef unsigned int uint; typedef _sigset_t sigset_t; -typedef unsigned int uid_t; -typedef unsigned int gid_t; - typedef unsigned int blksize_t; typedef unsigned __int64 blkcnt_t; typedef unsigned short nlink_t; diff --git a/ceph/src/include/rados.h b/ceph/src/include/rados.h index 55cfdcb78..eac3a2159 100644 --- a/ceph/src/include/rados.h +++ b/ceph/src/include/rados.h @@ -172,6 +172,7 @@ extern const char *ceph_osd_state_name(int s); #define CEPH_OSDMAP_PURGED_SNAPDIRS (1<<20) /* osds have converted snapsets */ #define CEPH_OSDMAP_NOSNAPTRIM (1<<21) /* disable snap trimming */ #define CEPH_OSDMAP_PGLOG_HARDLIMIT (1<<22) /* put a hard limit on pg log length */ +#define CEPH_OSDMAP_NOAUTOSCALE (1<<23) /* block pg autoscale */ /* these are hidden in 'ceph status' view */ #define CEPH_OSDMAP_SEMIHIDDEN_FLAGS (CEPH_OSDMAP_REQUIRE_JEWEL| \ diff --git a/ceph/src/include/win32/fs_compat.h b/ceph/src/include/win32/fs_compat.h index 318c8fab7..deeedf071 100644 --- a/ceph/src/include/win32/fs_compat.h +++ b/ceph/src/include/win32/fs_compat.h @@ -42,3 +42,6 @@ #define XATTR_CREATE 1 #define XATTR_REPLACE 2 + +typedef unsigned int uid_t; +typedef unsigned int gid_t; diff --git a/ceph/src/libcephfs.cc b/ceph/src/libcephfs.cc index 99da0c5c5..51e73efdb 100644 --- a/ceph/src/libcephfs.cc +++ b/ceph/src/libcephfs.cc @@ -19,6 +19,7 @@ #include "auth/Crypto.h" #include "client/Client.h" +#include "client/Inode.h" #include "librados/RadosClient.h" #include "common/async/context_pool.h" #include "common/ceph_argparse.h" @@ -28,6 +29,7 @@ #include "mon/MonClient.h" #include "include/str_list.h" #include "include/stringify.h" +#include "include/object.h" #include "messages/MMonMap.h" #include "msg/Messenger.h" #include "include/ceph_assert.h" @@ -687,6 +689,124 @@ extern "C" int ceph_readdirplus_r(struct ceph_mount_info *cmount, struct ceph_di return cmount->get_client()->readdirplus_r(reinterpret_cast(dirp), de, stx, want, flags, out); } +extern "C" int ceph_open_snapdiff(struct ceph_mount_info* cmount, + const char* root_path, + const char* rel_path, + const char* snap1, + const char* snap2, + struct ceph_snapdiff_info* out) +{ + if (!cmount->is_mounted()) { + /* we set errno to signal errors. */ + errno = ENOTCONN; + return -errno; + } + if (!out || !root_path || !rel_path || + !snap1 || !*snap1 || !snap2 || !*snap2) { + errno = EINVAL; + return -errno; + } + out->cmount = cmount; + out->dir1 = out->dir_aux = nullptr; + + char full_path1[PATH_MAX]; + char snapdir[PATH_MAX]; + cmount->conf_get("client_snapdir", snapdir, sizeof(snapdir) - 1); + int n = snprintf(full_path1, PATH_MAX, + "%s/%s/%s/%s", root_path, snapdir, snap1, rel_path); + if (n < 0 || n == PATH_MAX) { + errno = ENAMETOOLONG; + return -errno; + } + char full_path2[PATH_MAX]; + n = snprintf(full_path2, PATH_MAX, + "%s/%s/%s/%s", root_path, snapdir, snap2, rel_path); + if (n < 0 || n == PATH_MAX) { + errno = ENAMETOOLONG; + return -errno; + } + + int r = ceph_opendir(cmount, full_path1, &(out->dir1)); + if (r != 0) { + //it's OK to have one of the snap paths absent - attempting another one + r = ceph_opendir(cmount, full_path2, &(out->dir1)); + if (r != 0) { + // both snaps are absent, giving up + errno = ENOENT; + return -errno; + } + std::swap(snap1, snap2); // will use snap1 to learn snap_other below + } else { + // trying to open second snapshot to learn snapid and + // get the entry loaded into the client cache if any. + r = ceph_opendir(cmount, full_path2, &(out->dir_aux)); + //paranoic, rely on this value below + out->dir_aux = r == 0 ? out->dir_aux : nullptr; + } + if (!out->dir_aux) { + // now trying to learn the second snapshot's id by using snapshot's root + n = snprintf(full_path2, PATH_MAX, + "%s/%s/%s", root_path, snapdir, snap2); + ceph_assert(n > 0 && n < PATH_MAX); //we've already checked above + //that longer string fits. + // Hence unlikely to assert + r = ceph_opendir(cmount, full_path2, &(out->dir_aux)); + if (r != 0) { + goto close_err; + } + } + return 0; + +close_err: + ceph_close_snapdiff(out); + return r; +} + +extern "C" int ceph_readdir_snapdiff(struct ceph_snapdiff_info* snapdiff, + struct ceph_snapdiff_entry_t* out) +{ + if (!snapdiff->cmount->is_mounted()) { + /* also sets errno to signal errors. */ + errno = ENOTCONN; + return -errno; + } + dir_result_t* d1 = reinterpret_cast(snapdiff->dir1); + dir_result_t* d2 = reinterpret_cast(snapdiff->dir_aux); + if (!d1 || !d2 || !d1->inode || !d2->inode) { + errno = EINVAL; + return -errno; + } + snapid_t snapid; + int r = snapdiff->cmount->get_client()->readdir_snapdiff( + d1, + d2->inode->snapid, + &(out->dir_entry), + &snapid); + if (r >= 0) { + // converting snapid_t to uint64_t to avoid snapid_t exposure + out->snapid = snapid; + } + return r; +} + +extern "C" int ceph_close_snapdiff(struct ceph_snapdiff_info* snapdiff) +{ + if (!snapdiff->cmount || !snapdiff->cmount->is_mounted()) { + /* also sets errno to signal errors. */ + errno = ENOTCONN; + return -errno; + } + if (snapdiff->dir_aux) { + ceph_closedir(snapdiff->cmount, snapdiff->dir_aux); + } + if (snapdiff->dir1) { + ceph_closedir(snapdiff->cmount, snapdiff->dir1); + } + snapdiff->cmount = nullptr; + snapdiff->dir1 = snapdiff->dir_aux = nullptr; + return 0; +} + extern "C" int ceph_getdents(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, char *buf, int buflen) { diff --git a/ceph/src/libcephsqlite.cc b/ceph/src/libcephsqlite.cc index f533780c5..b4fb96841 100644 --- a/ceph/src/libcephsqlite.cc +++ b/ceph/src/libcephsqlite.cc @@ -54,9 +54,9 @@ SQLITE_EXTENSION_INIT1 #define dout_subsys ceph_subsys_cephsqlite #undef dout_prefix #define dout_prefix *_dout << "cephsqlite: " << __func__ << ": " -#define d(vfs,lvl) ldout(getcct(vfs), (lvl)) << "(client." << getdata(vfs).cluster.get_instance_id() << ") " -#define dv(lvl) d(vfs,(lvl)) -#define df(lvl) d(f->vfs,(lvl)) << f->loc << " " +#define d(cct,cluster,lvl) ldout((cct), (lvl)) << "(client." << cluster->get_instance_id() << ") " +#define dv(lvl) d(cct,cluster,(lvl)) +#define df(lvl) d(f->io.cct,f->io.cluster,(lvl)) << f->loc << " " enum { P_FIRST = 0xf0000, @@ -80,15 +80,21 @@ enum { P_LAST, }; +using cctptr = boost::intrusive_ptr; +using rsptr = std::shared_ptr; + struct cephsqlite_appdata { ~cephsqlite_appdata() { + { + std::scoped_lock lock(cluster_mutex); + _disconnect(); + } if (logger) { cct->get_perfcounters_collection()->remove(logger.get()); } if (striper_logger) { cct->get_perfcounters_collection()->remove(striper_logger.get()); } - cluster.shutdown(); } int setup_perf() { ceph_assert(cct); @@ -118,26 +124,96 @@ struct cephsqlite_appdata { cct->get_perfcounters_collection()->add(striper_logger.get()); return 0; } - int init_cluster() { + + std::pair get_cluster() { + std::scoped_lock lock(cluster_mutex); + if (!cct) { + if (int rc = _open(nullptr); rc < 0) { + ceph_abort("could not open connection to ceph"); + } + } + return {cct, cluster}; + } + int connect() { + std::scoped_lock lock(cluster_mutex); + return _connect(); + } + int reconnect() { + std::scoped_lock lock(cluster_mutex); + _disconnect(); + return _connect(); + } + int maybe_reconnect(rsptr _cluster) { + std::scoped_lock lock(cluster_mutex); + if (!cluster || cluster == _cluster) { + ldout(cct, 10) << "reconnecting to RADOS" << dendl; + _disconnect(); + return _connect(); + } else { + ldout(cct, 10) << "already reconnected" << dendl; + return 0; + } + } + int open(CephContext* _cct) { + std::scoped_lock lock(cluster_mutex); + return _open(_cct); + } + + std::unique_ptr logger; + std::shared_ptr striper_logger; + +private: + int _open(CephContext* _cct) { + if (!_cct) { + std::vector env_args; + env_to_vec(env_args, "CEPH_ARGS"); + std::string cluster, conf_file_list; // unused + CephInitParameters iparams = ceph_argparse_early_args(env_args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list); + cct = cctptr(common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0), false); + cct->_conf.parse_config_files(nullptr, &std::cerr, 0); + cct->_conf.parse_env(cct->get_module_type()); // environment variables override + cct->_conf.apply_changes(nullptr); + common_init_finish(cct.get()); + } else { + cct = cctptr(_cct); + } + + if (int rc = setup_perf(); rc < 0) { + return rc; + } + + if (int rc = _connect(); rc < 0) { + return rc; + } + + return 0; + } + void _disconnect() { + if (cluster) { + cluster.reset(); + } + } + int _connect() { ceph_assert(cct); + auto _cluster = rsptr(new librados::Rados()); ldout(cct, 5) << "initializing RADOS handle as " << cct->_conf->name << dendl; - if (int rc = cluster.init_with_context(cct.get()); rc < 0) { + if (int rc = _cluster->init_with_context(cct.get()); rc < 0) { lderr(cct) << "cannot initialize RADOS: " << cpp_strerror(rc) << dendl; return rc; } - if (int rc = cluster.connect(); rc < 0) { + if (int rc = _cluster->connect(); rc < 0) { lderr(cct) << "cannot connect: " << cpp_strerror(rc) << dendl; return rc; } - auto s = cluster.get_addrs(); + auto s = _cluster->get_addrs(); ldout(cct, 5) << "completed connection to RADOS with address " << s << dendl; + cluster = std::move(_cluster); return 0; } - boost::intrusive_ptr cct; - std::unique_ptr logger; - std::shared_ptr striper_logger; - librados::Rados cluster; + ceph::mutex cluster_mutex = ceph::make_mutex("libcephsqlite");; + cctptr cct; + rsptr cluster; }; struct cephsqlite_fileloc { @@ -147,6 +223,8 @@ struct cephsqlite_fileloc { }; struct cephsqlite_fileio { + cctptr cct; + rsptr cluster; // anchor for ioctx librados::IoCtx ioctx; std::unique_ptr rs; }; @@ -176,36 +254,6 @@ struct cephsqlite_file { #define getdata(vfs) (*((cephsqlite_appdata*)((vfs)->pAppData))) -static CephContext* getcct(sqlite3_vfs* vfs) -{ - auto&& appd = getdata(vfs); - auto& cct = appd.cct; - if (cct) { - return cct.get(); - } - - /* bootstrap cct */ - std::vector env_args; - env_to_vec(env_args, "CEPH_ARGS"); - std::string cluster, conf_file_list; // unused - CephInitParameters iparams = ceph_argparse_early_args(env_args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list); - cct = boost::intrusive_ptr(common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0), false); - cct->_conf.parse_config_files(nullptr, &std::cerr, 0); - cct->_conf.parse_env(cct->get_module_type()); // environment variables override - cct->_conf.apply_changes(nullptr); - common_init_finish(cct.get()); - - if (int rc = appd.setup_perf(); rc < 0) { - ceph_abort("cannot setup perf counters"); - } - - if (int rc = appd.init_cluster(); rc < 0) { - ceph_abort("cannot setup RADOS cluster handle"); - } - - return cct.get(); -} - static int Lock(sqlite3_file *file, int ilock) { auto f = (cephsqlite_file*)file; @@ -218,6 +266,9 @@ static int Lock(sqlite3_file *file, int ilock) if (!f->io.rs->is_locked() && ilock > SQLITE_LOCK_NONE) { if (int rc = f->io.rs->lock(0); rc < 0) { df(5) << "failed: " << rc << dendl; + if (rc == -EBLOCKLISTED) { + getdata(f->vfs).maybe_reconnect(f->io.cluster); + } return SQLITE_IOERR; } } @@ -240,6 +291,9 @@ static int Unlock(sqlite3_file *file, int ilock) if (ilock <= SQLITE_LOCK_NONE && SQLITE_LOCK_NONE < lock) { if (int rc = f->io.rs->unlock(); rc < 0) { df(5) << "failed: " << rc << dendl; + if (rc == -EBLOCKLISTED) { + getdata(f->vfs).maybe_reconnect(f->io.cluster); + } return SQLITE_IOERR; } } @@ -290,13 +344,16 @@ static int Read(sqlite3_file *file, void *buf, int len, sqlite_int64 off) if (int rc = f->io.rs->read(buf, len, off); rc < 0) { df(5) << "read failed: " << cpp_strerror(rc) << dendl; + if (rc == -EBLOCKLISTED) { + getdata(f->vfs).maybe_reconnect(f->io.cluster); + } return SQLITE_IOERR_READ; } else { df(5) << "= " << rc << dendl; auto end = ceph::coarse_mono_clock::now(); getdata(f->vfs).logger->tinc(P_OPF_READ, end-start); if (rc < len) { - memset(buf, 0, len-rc); + memset((unsigned char*)buf+rc, 0, len-rc); return SQLITE_IOERR_SHORT_READ; } else { return SQLITE_OK; @@ -312,6 +369,9 @@ static int Write(sqlite3_file *file, const void *buf, int len, sqlite_int64 off) if (int rc = f->io.rs->write(buf, len, off); rc < 0) { df(5) << "write failed: " << cpp_strerror(rc) << dendl; + if (rc == -EBLOCKLISTED) { + getdata(f->vfs).maybe_reconnect(f->io.cluster); + } return SQLITE_IOERR_WRITE; } else { df(5) << "= " << rc << dendl; @@ -330,6 +390,9 @@ static int Truncate(sqlite3_file *file, sqlite_int64 size) if (int rc = f->io.rs->truncate(size); rc < 0) { df(5) << "truncate failed: " << cpp_strerror(rc) << dendl; + if (rc == -EBLOCKLISTED) { + getdata(f->vfs).maybe_reconnect(f->io.cluster); + } return SQLITE_IOERR; } @@ -346,6 +409,9 @@ static int Sync(sqlite3_file *file, int flags) if (int rc = f->io.rs->flush(); rc < 0) { df(5) << "failed: " << cpp_strerror(rc) << dendl; + if (rc == -EBLOCKLISTED) { + getdata(f->vfs).maybe_reconnect(f->io.cluster); + } return SQLITE_IOERR; } @@ -366,6 +432,9 @@ static int FileSize(sqlite3_file *file, sqlite_int64 *osize) uint64_t size = 0; if (int rc = f->io.rs->stat(&size); rc < 0) { df(5) << "stat failed: " << cpp_strerror(rc) << dendl; + if (rc == -EBLOCKLISTED) { + getdata(f->vfs).maybe_reconnect(f->io.cluster); + } return SQLITE_NOTFOUND; } @@ -397,37 +466,34 @@ static bool parsepath(std::string_view path, struct cephsqlite_fileloc* fileloc) return true; } -static int makestriper(sqlite3_vfs* vfs, const cephsqlite_fileloc& loc, cephsqlite_fileio* io) +static int makestriper(sqlite3_vfs* vfs, cctptr cct, rsptr cluster, const cephsqlite_fileloc& loc, cephsqlite_fileio* io) { - auto&& appd = getdata(vfs); - auto& cct = appd.cct; - auto& cluster = appd.cluster; bool gotmap = false; - dv(10) << loc << dendl; + d(cct,cluster,10) << loc << dendl; enoent_retry: if (loc.pool[0] == '*') { std::string err; int64_t id = strict_strtoll(loc.pool.c_str()+1, 10, &err); ceph_assert(err.empty()); - if (int rc = cluster.ioctx_create2(id, io->ioctx); rc < 0) { + if (int rc = cluster->ioctx_create2(id, io->ioctx); rc < 0) { if (rc == -ENOENT && !gotmap) { - cluster.wait_for_latest_osdmap(); + cluster->wait_for_latest_osdmap(); gotmap = true; goto enoent_retry; } - dv(10) << "cannot create ioctx: " << cpp_strerror(rc) << dendl; + d(cct,cluster,1) << "cannot create ioctx: " << cpp_strerror(rc) << dendl; return rc; } } else { - if (int rc = cluster.ioctx_create(loc.pool.c_str(), io->ioctx); rc < 0) { + if (int rc = cluster->ioctx_create(loc.pool.c_str(), io->ioctx); rc < 0) { if (rc == -ENOENT && !gotmap) { - cluster.wait_for_latest_osdmap(); + cluster->wait_for_latest_osdmap(); gotmap = true; goto enoent_retry; } - dv(10) << "cannot create ioctx: " << cpp_strerror(rc) << dendl; + d(cct,cluster,1) << "cannot create ioctx: " << cpp_strerror(rc) << dendl; return rc; } } @@ -436,10 +502,12 @@ enoent_retry: io->ioctx.set_namespace(loc.radosns); io->rs = std::make_unique(io->ioctx, loc.name); - io->rs->set_logger(appd.striper_logger); + io->rs->set_logger(getdata(vfs).striper_logger); io->rs->set_lock_timeout(cct->_conf.get_val("cephsqlite_lock_renewal_timeout")); io->rs->set_lock_interval(cct->_conf.get_val("cephsqlite_lock_renewal_interval")); io->rs->set_blocklist_the_dead(cct->_conf.get_val("cephsqlite_blocklist_dead_locker")); + io->cluster = std::move(cluster); + io->cct = cct; return 0; } @@ -502,7 +570,7 @@ static int Open(sqlite3_vfs *vfs, const char *name, sqlite3_file *file, auto start = ceph::coarse_mono_clock::now(); bool gotmap = false; - auto& cluster = getdata(vfs).cluster; + auto [cct, cluster] = getdata(vfs).get_cluster(); /* we are not going to create temporary files */ if (name == NULL) { @@ -525,9 +593,9 @@ static int Open(sqlite3_vfs *vfs, const char *name, sqlite3_file *file, f->flags = flags; enoent_retry: - if (int rc = makestriper(vfs, f->loc, &f->io); rc < 0) { + if (int rc = makestriper(vfs, cct, cluster, f->loc, &f->io); rc < 0) { f->~cephsqlite_file(); - dv(5) << "cannot open striper" << dendl; + dv(-1) << "cannot open striper" << dendl; return SQLITE_IOERR; } @@ -540,7 +608,7 @@ enoent_retry: * in testing when pools are getting created/deleted left and right. */ dv(5) << "retrying create after getting latest OSDMap" << dendl; - cluster.wait_for_latest_osdmap(); + cluster->wait_for_latest_osdmap(); gotmap = true; goto enoent_retry; } @@ -553,7 +621,7 @@ enoent_retry: if (rc == -ENOENT && !gotmap) { /* See comment above for create case. */ dv(5) << "retrying open after getting latest OSDMap" << dendl; - cluster.wait_for_latest_osdmap(); + cluster->wait_for_latest_osdmap(); gotmap = true; goto enoent_retry; } @@ -578,6 +646,7 @@ enoent_retry: static int Delete(sqlite3_vfs* vfs, const char* path, int dsync) { auto start = ceph::coarse_mono_clock::now(); + auto [cct, cluster] = getdata(vfs).get_cluster(); dv(5) << "'" << path << "', " << dsync << dendl; cephsqlite_fileloc fileloc; @@ -587,8 +656,8 @@ static int Delete(sqlite3_vfs* vfs, const char* path, int dsync) } cephsqlite_fileio io; - if (int rc = makestriper(vfs, fileloc, &io); rc < 0) { - dv(5) << "cannot open striper" << dendl; + if (int rc = makestriper(vfs, cct, cluster, fileloc, &io); rc < 0) { + dv(-1) << "cannot open striper" << dendl; return SQLITE_IOERR; } @@ -616,6 +685,7 @@ static int Delete(sqlite3_vfs* vfs, const char* path, int dsync) static int Access(sqlite3_vfs* vfs, const char* path, int flags, int* result) { auto start = ceph::coarse_mono_clock::now(); + auto [cct, cluster] = getdata(vfs).get_cluster(); dv(5) << path << " " << std::hex << flags << dendl; cephsqlite_fileloc fileloc; @@ -625,8 +695,8 @@ static int Access(sqlite3_vfs* vfs, const char* path, int flags, int* result) } cephsqlite_fileio io; - if (int rc = makestriper(vfs, fileloc, &io); rc < 0) { - dv(5) << "cannot open striper" << dendl; + if (int rc = makestriper(vfs, cct, cluster, fileloc, &io); rc < 0) { + dv(-1) << "cannot open striper" << dendl; return SQLITE_IOERR; } @@ -662,7 +732,7 @@ static int FullPathname(sqlite3_vfs* vfs, const char* ipath, int opathlen, char* { auto start = ceph::coarse_mono_clock::now(); auto path = std::string_view(ipath); - + auto [cct, cluster] = getdata(vfs).get_cluster(); dv(5) << "1: " << path << dendl; cephsqlite_fileloc fileloc; @@ -688,6 +758,7 @@ static int FullPathname(sqlite3_vfs* vfs, const char* ipath, int opathlen, char* static int CurrentTime(sqlite3_vfs* vfs, sqlite3_int64* time) { auto start = ceph::coarse_mono_clock::now(); + auto [cct, cluster] = getdata(vfs).get_cluster(); dv(5) << time << dendl; auto t = ceph_clock_now(); @@ -698,33 +769,29 @@ static int CurrentTime(sqlite3_vfs* vfs, sqlite3_int64* time) return SQLITE_OK; } -LIBCEPHSQLITE_API int cephsqlite_setcct(CephContext* cct, char** ident) +LIBCEPHSQLITE_API int cephsqlite_setcct(CephContext* _cct, char** ident) { - ldout(cct, 1) << "cct: " << cct << dendl; + ldout(_cct, 1) << "cct: " << _cct << dendl; if (sqlite3_api == nullptr) { - lderr(cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl; + lderr(_cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl; return -EINVAL; } auto vfs = sqlite3_vfs_find("ceph"); if (!vfs) { - lderr(cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl; + lderr(_cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl; return -EINVAL; } auto& appd = getdata(vfs); - appd.cct = cct; - if (int rc = appd.setup_perf(); rc < 0) { - appd.cct = nullptr; - return rc; - } - if (int rc = appd.init_cluster(); rc < 0) { - appd.cct = nullptr; + if (int rc = appd.open(_cct); rc < 0) { return rc; } - auto s = appd.cluster.get_addrs(); + auto [cct, cluster] = appd.get_cluster(); + + auto s = cluster->get_addrs(); if (ident) { *ident = strdup(s.c_str()); } @@ -737,6 +804,7 @@ LIBCEPHSQLITE_API int cephsqlite_setcct(CephContext* cct, char** ident) static void f_perf(sqlite3_context* ctx, int argc, sqlite3_value** argv) { auto vfs = (sqlite3_vfs*)sqlite3_user_data(ctx); + auto [cct, cluster] = getdata(vfs).get_cluster(); dv(10) << dendl; auto&& appd = getdata(vfs); JSONFormatter f(false); @@ -756,12 +824,12 @@ static void f_perf(sqlite3_context* ctx, int argc, sqlite3_value** argv) static void f_status(sqlite3_context* ctx, int argc, sqlite3_value** argv) { auto vfs = (sqlite3_vfs*)sqlite3_user_data(ctx); + auto [cct, cluster] = getdata(vfs).get_cluster(); dv(10) << dendl; - auto&& appd = getdata(vfs); JSONFormatter f(false); f.open_object_section("ceph_status"); - f.dump_int("id", appd.cluster.get_instance_id()); - f.dump_string("addr", appd.cluster.get_addrs()); + f.dump_int("id", cluster->get_instance_id()); + f.dump_string("addr", cluster->get_addrs()); f.close_section(); { CachedStackStringStream css; diff --git a/ceph/src/librbd/ImageWatcher.cc b/ceph/src/librbd/ImageWatcher.cc index 08159d270..fbb4c8339 100644 --- a/ceph/src/librbd/ImageWatcher.cc +++ b/ceph/src/librbd/ImageWatcher.cc @@ -578,8 +578,7 @@ void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) { return; } - std::shared_lock watch_locker{this->m_watch_lock}; - if (this->is_registered(this->m_watch_lock)) { + if (is_registered()) { ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl; auto ctx = new LambdaContext([this](int r) { diff --git a/ceph/src/librbd/ManagedLock.cc b/ceph/src/librbd/ManagedLock.cc index 8a05957ec..166a31c61 100644 --- a/ceph/src/librbd/ManagedLock.cc +++ b/ceph/src/librbd/ManagedLock.cc @@ -207,7 +207,8 @@ void ManagedLock::reacquire_lock(Context *on_reacquired) { { std::lock_guard locker{m_lock}; - if (m_state == STATE_WAITING_FOR_REGISTER) { + if (m_state == STATE_WAITING_FOR_REGISTER || + m_state == STATE_WAITING_FOR_LOCK) { // restart the acquire lock process now that watch is valid ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl; Action active_action = get_active_action(); @@ -217,8 +218,7 @@ void ManagedLock::reacquire_lock(Context *on_reacquired) { } else if (!is_state_shutdown() && (m_state == STATE_LOCKED || m_state == STATE_ACQUIRING || - m_state == STATE_POST_ACQUIRING || - m_state == STATE_WAITING_FOR_LOCK)) { + m_state == STATE_POST_ACQUIRING)) { // interlock the lock operation with other state ops ldout(m_cct, 10) << dendl; execute_action(ACTION_REACQUIRE_LOCK, on_reacquired); diff --git a/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc b/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc index fd6f9b502..c8e3a4fe7 100644 --- a/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc +++ b/ceph/src/librbd/mirror/snapshot/CreatePrimaryRequest.cc @@ -177,6 +177,7 @@ void CreatePrimaryRequest::handle_refresh_image(int r) { template void CreatePrimaryRequest::unlink_peer() { + // TODO: Document semantics for unlink_peer uint64_t max_snapshots = m_image_ctx->config.template get_val( "rbd_mirroring_max_mirroring_snapshots"); ceph_assert(max_snapshots >= 3); @@ -184,55 +185,61 @@ void CreatePrimaryRequest::unlink_peer() { std::string peer_uuid; uint64_t snap_id = CEPH_NOSNAP; - for (auto &peer : m_mirror_peer_uuids) { + { std::shared_lock image_locker{m_image_ctx->image_lock}; - size_t count = 0; - uint64_t unlink_snap_id = 0; - for (auto &snap_it : m_image_ctx->snap_info) { - auto info = std::get_if( - &snap_it.second.snap_namespace); - if (info == nullptr) { - continue; - } - if (info->state != cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY) { - // reset counters -- we count primary snapshots after the last promotion - count = 0; - unlink_snap_id = 0; - continue; - } - // call UnlinkPeerRequest only if the snapshot is linked with this peer - // or if it's not linked with any peer (happens if mirroring is enabled - // on a pool with no peers configured or if UnlinkPeerRequest gets - // interrupted) - if (!info->mirror_peer_uuids.empty() && - info->mirror_peer_uuids.count(peer) == 0) { - continue; - } - if (info->mirror_peer_uuids.empty() || !info->complete) { - peer_uuid = peer; - snap_id = snap_it.first; - break; - } - count++; - if (count == max_snapshots) { - unlink_snap_id = snap_it.first; - } - if (count > max_snapshots) { - peer_uuid = peer; - snap_id = unlink_snap_id; - break; + for (const auto& peer : m_mirror_peer_uuids) { + for (const auto& snap_info_pair : m_image_ctx->snap_info) { + auto info = std::get_if( + &snap_info_pair.second.snap_namespace); + if (info == nullptr) { + continue; + } + if (info->mirror_peer_uuids.empty() || + (info->mirror_peer_uuids.count(peer) != 0 && + info->is_primary() && !info->complete)) { + peer_uuid = peer; + snap_id = snap_info_pair.first; + goto do_unlink; + } } } - if (snap_id != CEPH_NOSNAP) { - break; + for (const auto& peer : m_mirror_peer_uuids) { + size_t count = 0; + uint64_t unlink_snap_id = 0; + for (const auto& snap_info_pair : m_image_ctx->snap_info) { + auto info = std::get_if( + &snap_info_pair.second.snap_namespace); + if (info == nullptr) { + continue; + } + if (info->state != cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY) { + // reset counters -- we count primary snapshots after the last + // promotion + count = 0; + unlink_snap_id = 0; + continue; + } + if (info->mirror_peer_uuids.count(peer) == 0) { + // snapshot is not linked with this peer + continue; + } + count++; + if (count == max_snapshots) { + unlink_snap_id = snap_info_pair.first; + } + if (count > max_snapshots) { + peer_uuid = peer; + snap_id = unlink_snap_id; + goto do_unlink; + } + } } } - if (snap_id == CEPH_NOSNAP) { - finish(0); - return; - } + finish(0); + return; +do_unlink: CephContext *cct = m_image_ctx->cct; ldout(cct, 15) << "peer=" << peer_uuid << ", snap_id=" << snap_id << dendl; diff --git a/ceph/src/librbd/operation/SnapshotRemoveRequest.cc b/ceph/src/librbd/operation/SnapshotRemoveRequest.cc index cc975d176..f3b4dc62e 100644 --- a/ceph/src/librbd/operation/SnapshotRemoveRequest.cc +++ b/ceph/src/librbd/operation/SnapshotRemoveRequest.cc @@ -355,9 +355,10 @@ void SnapshotRemoveRequest::handle_remove_object_map(int r) { template void SnapshotRemoveRequest::remove_image_state() { I &image_ctx = this->m_image_ctx; - auto type = cls::rbd::get_snap_namespace_type(m_snap_namespace); - if (type != cls::rbd::SNAPSHOT_NAMESPACE_TYPE_MIRROR) { + const auto* info = std::get_if( + &m_snap_namespace); + if (info == nullptr || info->is_orphan()) { release_snap_id(); return; } diff --git a/ceph/src/mds/BatchOp.h b/ceph/src/mds/BatchOp.h index bc4e21bce..5277c3d69 100644 --- a/ceph/src/mds/BatchOp.h +++ b/ceph/src/mds/BatchOp.h @@ -27,7 +27,7 @@ public: virtual void add_request(const ceph::ref_t& mdr) = 0; virtual ceph::ref_t find_new_head() = 0; - virtual void print(std::ostream&) = 0; + virtual void print(std::ostream&) const = 0; void forward(mds_rank_t target); void respond(int r); diff --git a/ceph/src/mds/CDentry.cc b/ceph/src/mds/CDentry.cc index 6f20f53d1..b6d169b9e 100644 --- a/ceph/src/mds/CDentry.cc +++ b/ceph/src/mds/CDentry.cc @@ -33,7 +33,7 @@ using namespace std; -ostream& CDentry::print_db_line_prefix(ostream& out) +ostream& CDentry::print_db_line_prefix(ostream& out) const { return out << ceph_clock_now() << " mds." << dir->mdcache->mds->get_nodeid() << ".cache.den(" << dir->ino() << " " << name << ") "; } @@ -108,8 +108,6 @@ ostream& operator<<(ostream& out, const CDentry& dn) out << " state=" << dn.get_state(); if (dn.is_new()) out << "|new"; if (dn.state_test(CDentry::STATE_BOTTOMLRU)) out << "|bottomlru"; - if (dn.state_test(CDentry::STATE_UNLINKING)) out << "|unlinking"; - if (dn.state_test(CDentry::STATE_REINTEGRATING)) out << "|reintegrating"; if (dn.get_num_ref()) { out << " |"; @@ -137,7 +135,7 @@ bool operator<(const CDentry& l, const CDentry& r) } -void CDentry::print(ostream& out) +void CDentry::print(ostream& out) const { out << *this; } @@ -716,7 +714,7 @@ bool CDentry::check_corruption(bool load) } if (!load && g_conf().get_val("mds_abort_on_newly_corrupt_dentry")) { dir->mdcache->mds->clog->error() << "MDS abort because newly corrupt dentry to be committed: " << *this; - ceph_abort("detected newly corrupt dentry"); /* avoid writing out newly corrupted dn */ + dir->mdcache->mds->abort("detected newly corrupt dentry"); /* avoid writing out newly corrupted dn */ } return true; } diff --git a/ceph/src/mds/CDentry.h b/ceph/src/mds/CDentry.h index c4acf8768..4cbf24f0c 100644 --- a/ceph/src/mds/CDentry.h +++ b/ceph/src/mds/CDentry.h @@ -29,7 +29,6 @@ #include "BatchOp.h" #include "MDSCacheObject.h" #include "MDSContext.h" -#include "Mutation.h" #include "SimpleLock.h" #include "LocalLockC.h" #include "ScrubHeader.h" @@ -87,25 +86,18 @@ public: static const int STATE_EVALUATINGSTRAY = (1<<4); static const int STATE_PURGINGPINNED = (1<<5); static const int STATE_BOTTOMLRU = (1<<6); - static const int STATE_UNLINKING = (1<<7); - static const int STATE_REINTEGRATING = (1<<8); // stray dentry needs notification of releasing reference static const int STATE_STRAY = STATE_NOTIFYREF; static const int MASK_STATE_IMPORT_KEPT = STATE_BOTTOMLRU; // -- pins -- - static const int PIN_INODEPIN = 1; // linked inode is pinned - static const int PIN_FRAGMENTING = -2; // containing dir is refragmenting - static const int PIN_PURGING = 3; - static const int PIN_SCRUBPARENT = 4; - static const int PIN_WAITUNLINKSTATE = 5; + static const int PIN_INODEPIN = 1; // linked inode is pinned + static const int PIN_FRAGMENTING = -2; // containing dir is refragmenting + static const int PIN_PURGING = 3; + static const int PIN_SCRUBPARENT = 4; static const unsigned EXPORT_NONCE = 1; - const static uint64_t WAIT_UNLINK_STATE = (1<<0); - const static uint64_t WAIT_UNLINK_FINISH = (1<<1); - const static uint64_t WAIT_REINTEGRATE_FINISH = (1<<2); - uint32_t replica_unlinking_ref = 0; CDentry(std::string_view n, __u32 h, mempool::mds_co::string alternate_name, @@ -144,7 +136,6 @@ public: case PIN_FRAGMENTING: return "fragmenting"; case PIN_PURGING: return "purging"; case PIN_SCRUBPARENT: return "scrubparent"; - case PIN_WAITUNLINKSTATE: return "waitunlinkstate"; default: return generic_pin_name(p); } } @@ -358,8 +349,8 @@ public: void remove_client_lease(ClientLease *r, Locker *locker); // returns remaining mask (if any), and kicks locker eval_gathers void remove_client_leases(Locker *locker); - std::ostream& print_db_line_prefix(std::ostream& out) override; - void print(std::ostream& out) override; + std::ostream& print_db_line_prefix(std::ostream& out) const override; + void print(std::ostream& out) const override; void dump(ceph::Formatter *f) const; static void encode_remote(inodeno_t& ino, unsigned char d_type, diff --git a/ceph/src/mds/CDir.cc b/ceph/src/mds/CDir.cc index ca0c2ec17..0484c38cc 100644 --- a/ceph/src/mds/CDir.cc +++ b/ceph/src/mds/CDir.cc @@ -180,7 +180,7 @@ ostream& operator<<(ostream& out, const CDir& dir) } -void CDir::print(ostream& out) +void CDir::print(ostream& out) const { out << *this; } @@ -188,7 +188,7 @@ void CDir::print(ostream& out) -ostream& CDir::print_db_line_prefix(ostream& out) +ostream& CDir::print_db_line_prefix(ostream& out) const { return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.dir(" << this->dirfrag() << ") "; } diff --git a/ceph/src/mds/CDir.h b/ceph/src/mds/CDir.h index 58507db97..7cc4dc7ff 100644 --- a/ceph/src/mds/CDir.h +++ b/ceph/src/mds/CDir.h @@ -611,8 +611,8 @@ public: } void enable_frozen_inode(); - std::ostream& print_db_line_prefix(std::ostream& out) override; - void print(std::ostream& out) override; + std::ostream& print_db_line_prefix(std::ostream& out) const override; + void print(std::ostream& out) const override; void dump(ceph::Formatter *f, int flags = DUMP_DEFAULT) const; void dump_load(ceph::Formatter *f); diff --git a/ceph/src/mds/CInode.cc b/ceph/src/mds/CInode.cc index e6cd03442..23cb087c8 100644 --- a/ceph/src/mds/CInode.cc +++ b/ceph/src/mds/CInode.cc @@ -131,7 +131,7 @@ std::string_view CInode::pin_name(int p) const } //int cinode_pins[CINODE_NUM_PINS]; // counts -ostream& CInode::print_db_line_prefix(ostream& out) +ostream& CInode::print_db_line_prefix(ostream& out) const { return out << ceph_clock_now() << " mds." << mdcache->mds->get_nodeid() << ".cache.ino(" << ino() << ") "; } @@ -338,7 +338,7 @@ CInode::CInode(MDCache *c, bool auth, snapid_t f, snapid_t l) : state_set(STATE_AUTH); } -void CInode::print(ostream& out) +void CInode::print(ostream& out) const { out << *this; } diff --git a/ceph/src/mds/CInode.h b/ceph/src/mds/CInode.h index 2f7d6d242..979b45174 100644 --- a/ceph/src/mds/CInode.h +++ b/ceph/src/mds/CInode.h @@ -398,8 +398,7 @@ class CInode : public MDSCacheObject, public InodeStoreBase, public Counterget_client() : client_t(-1); } +int Capability::confirm_receipt(ceph_seq_t seq, unsigned caps) { + int was_revoking = (_issued & ~_pending); + if (seq == last_sent) { + _revokes.clear(); + _issued = caps; + // don't add bits + _pending &= caps; + + // if the revoking is not totally finished just add the + // new revoking caps back. + if (was_revoking && revoking()) { + CInode *in = get_inode(); + dout(10) << "revocation is not totally finished yet on " << *in + << ", the session " << *session << dendl; + _revokes.emplace_back(_pending, last_sent, last_issue); + if (!is_notable()) + mark_notable(); + } + } else { + // can i forget any revocations? + while (!_revokes.empty() && _revokes.front().seq < seq) + _revokes.pop_front(); + if (!_revokes.empty()) { + if (_revokes.front().seq == seq) + _revokes.begin()->before = caps; + calc_issued(); + } else { + // seq < last_sent + _issued = caps | _pending; + } + } + + if (was_revoking && _issued == _pending) { + item_revoking_caps.remove_myself(); + item_client_revoking_caps.remove_myself(); + maybe_clear_notable(); + } + return was_revoking & ~_issued; // return revoked +} + bool Capability::is_stale() const { return session ? session->is_stale() : false; diff --git a/ceph/src/mds/Capability.h b/ceph/src/mds/Capability.h index f7119f002..3fd6d2ce6 100644 --- a/ceph/src/mds/Capability.h +++ b/ceph/src/mds/Capability.h @@ -182,34 +182,7 @@ public: inc_last_seq(); return last_sent; } - int confirm_receipt(ceph_seq_t seq, unsigned caps) { - int was_revoking = (_issued & ~_pending); - if (seq == last_sent) { - _revokes.clear(); - _issued = caps; - // don't add bits - _pending &= caps; - } else { - // can i forget any revocations? - while (!_revokes.empty() && _revokes.front().seq < seq) - _revokes.pop_front(); - if (!_revokes.empty()) { - if (_revokes.front().seq == seq) - _revokes.begin()->before = caps; - calc_issued(); - } else { - // seq < last_sent - _issued = caps | _pending; - } - } - - if (was_revoking && _issued == _pending) { - item_revoking_caps.remove_myself(); - item_client_revoking_caps.remove_myself(); - maybe_clear_notable(); - } - return was_revoking & ~_issued; // return revoked - } + int confirm_receipt(ceph_seq_t seq, unsigned caps); // we may get a release racing with revocations, which means our revokes will be ignored // by the client. clean them out of our _revokes history so we don't wait on them. void clean_revoke_from(ceph_seq_t li) { diff --git a/ceph/src/mds/FSMap.h b/ceph/src/mds/FSMap.h index 5bf2f6b26..f57a4177a 100644 --- a/ceph/src/mds/FSMap.h +++ b/ceph/src/mds/FSMap.h @@ -205,8 +205,16 @@ public: void print(std::ostream& out) const; bool is_upgradeable() const { - return (mds_map.allows_standby_replay() && mds_map.get_num_in_mds() == 0) - || (!mds_map.allows_standby_replay() && mds_map.get_num_in_mds() <= 1); + bool asr = mds_map.allows_standby_replay(); + auto in_mds = mds_map.get_num_in_mds(); + auto up_mds = mds_map.get_num_up_mds(); + return + /* fs was "down" */ + (in_mds == 0) + /* max_mds was set to 1; asr must be disabled */ + || (!asr && in_mds == 1) + /* max_mds any value and all MDS were failed; asr must be disabled */ + || (!asr && up_mds == 0); } /** diff --git a/ceph/src/mds/FSMapUser.cc b/ceph/src/mds/FSMapUser.cc index 63a58acc8..1b58fdbb2 100644 --- a/ceph/src/mds/FSMapUser.cc +++ b/ceph/src/mds/FSMapUser.cc @@ -62,7 +62,7 @@ void FSMapUser::print(std::ostream& out) const out << " id " << p.second.cid << " name " << p.second.name << std::endl; } -void FSMapUser::print_summary(ceph::Formatter *f, std::ostream *out) +void FSMapUser::print_summary(ceph::Formatter *f, std::ostream *out) const { std::map by_rank; std::map by_state; diff --git a/ceph/src/mds/FSMapUser.h b/ceph/src/mds/FSMapUser.h index a0be8e714..96ac26d6d 100644 --- a/ceph/src/mds/FSMapUser.h +++ b/ceph/src/mds/FSMapUser.h @@ -46,7 +46,7 @@ public: void decode(ceph::buffer::list::const_iterator& bl); void print(std::ostream& out) const; - void print_summary(ceph::Formatter *f, std::ostream *out); + void print_summary(ceph::Formatter *f, std::ostream *out) const; static void generate_test_instances(std::list& ls); @@ -57,7 +57,7 @@ public: WRITE_CLASS_ENCODER_FEATURES(FSMapUser::fs_info_t) WRITE_CLASS_ENCODER_FEATURES(FSMapUser) -inline std::ostream& operator<<(std::ostream& out, FSMapUser& m) { +inline std::ostream& operator<<(std::ostream& out, const FSMapUser& m) { m.print_summary(NULL, &out); return out; } diff --git a/ceph/src/mds/Locker.cc b/ceph/src/mds/Locker.cc index 1112dd80a..5d7ec56f2 100644 --- a/ceph/src/mds/Locker.cc +++ b/ceph/src/mds/Locker.cc @@ -1984,7 +1984,6 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) void Locker::_finish_xlock(SimpleLock *lock, client_t xlocker, bool *pneed_issue) { - ceph_assert(!lock->is_stable()); if (lock->get_type() != CEPH_LOCK_DN && lock->get_type() != CEPH_LOCK_ISNAP && lock->get_type() != CEPH_LOCK_IPOLICY && diff --git a/ceph/src/mds/MDCache.cc b/ceph/src/mds/MDCache.cc index a2aea7a49..2ea13155e 100644 --- a/ceph/src/mds/MDCache.cc +++ b/ceph/src/mds/MDCache.cc @@ -6037,8 +6037,6 @@ void MDCache::finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snap << realm->get_newest_seq() << " on " << *realm << dendl; auto snap = make_message(CEPH_SNAP_OP_UPDATE); snap->bl = mds->server->get_snap_trace(client, realm); - for (const auto& child : realm->open_children) - snap->split_realms.push_back(child->inode->ino()); updates.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple(snap)); } else { dout(10) << "finish_snaprealm_reconnect client." << client << " up to date" @@ -8235,10 +8233,6 @@ void MDCache::dispatch(const cref_t &m) case MSG_MDS_DENTRYUNLINK: handle_dentry_unlink(ref_cast(m)); break; - case MSG_MDS_DENTRYUNLINK_ACK: - handle_dentry_unlink_ack(ref_cast(m)); - break; - case MSG_MDS_FRAGMENTNOTIFY: handle_fragment_notify(ref_cast(m)); @@ -8674,7 +8668,7 @@ int MDCache::path_traverse(MDRequestRef& mdr, MDSContextFactory& cf, // success. if (mds->logger) mds->logger->inc(l_mds_traverse_hit); dout(10) << "path_traverse finish on snapid " << snapid << dendl; - if (mdr) + if (mdr) ceph_assert(mdr->snapid == snapid); if (flags & MDS_TRAVERSE_RDLOCK_SNAP) @@ -9756,7 +9750,7 @@ void MDCache::request_forward(MDRequestRef& mdr, mds_rank_t who, int port) if (mdr->is_batch_head()) { mdr->release_batch_op()->forward(who); } else { - mds->forward_message_mds(mdr->release_client_request(), who); + mds->forward_message_mds(mdr, who); } if (mds->logger) mds->logger->inc(l_mds_forward); } else if (mdr->internal_op >= 0) { @@ -11227,8 +11221,7 @@ void MDCache::handle_dentry_link(const cref_t &m) // UNLINK -void MDCache::send_dentry_unlink(CDentry *dn, CDentry *straydn, - MDRequestRef& mdr, bool unlinking) +void MDCache::send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& mdr) { dout(10) << __func__ << " " << *dn << dendl; // share unlink news with replicas @@ -11240,11 +11233,6 @@ void MDCache::send_dentry_unlink(CDentry *dn, CDentry *straydn, CInode *strayin = straydn->get_linkage()->get_inode(); strayin->encode_snap_blob(snapbl); } - - if (unlinking) { - ceph_assert(!straydn); - dn->replica_unlinking_ref = 0; - } for (set::iterator it = replicas.begin(); it != replicas.end(); ++it) { @@ -11257,21 +11245,12 @@ void MDCache::send_dentry_unlink(CDentry *dn, CDentry *straydn, rejoin_gather.count(*it))) continue; - auto unlink = make_message(dn->get_dir()->dirfrag(), - dn->get_name(), unlinking); + auto unlink = make_message(dn->get_dir()->dirfrag(), dn->get_name()); if (straydn) { encode_replica_stray(straydn, *it, unlink->straybl); unlink->snapbl = snapbl; } mds->send_message_mds(unlink, *it); - if (unlinking) { - dn->replica_unlinking_ref++; - dn->get(CDentry::PIN_WAITUNLINKSTATE); - } - } - - if (unlinking && dn->replica_unlinking_ref) { - dn->add_waiter(CDentry::WAIT_UNLINK_STATE, new C_MDS_RetryRequest(this, mdr)); } } @@ -11280,40 +11259,23 @@ void MDCache::handle_dentry_unlink(const cref_t &m) // straydn CDentry *straydn = nullptr; CInode *strayin = nullptr; - if (m->straybl.length()) decode_replica_stray(straydn, &strayin, m->straybl, mds_rank_t(m->get_source().num())); - boost::intrusive_ptr ack; - CDentry::linkage_t *dnl; - CDentry *dn; - CInode *in; - bool hadrealm; - CDir *dir = get_dirfrag(m->get_dirfrag()); if (!dir) { dout(7) << __func__ << " don't have dirfrag " << m->get_dirfrag() << dendl; - if (m->is_unlinking()) - goto ack; } else { - dn = dir->lookup(m->get_dn()); + CDentry *dn = dir->lookup(m->get_dn()); if (!dn) { dout(7) << __func__ << " don't have dentry " << *dir << " dn " << m->get_dn() << dendl; - if (m->is_unlinking()) - goto ack; } else { dout(7) << __func__ << " on " << *dn << dendl; - - if (m->is_unlinking()) { - dn->state_set(CDentry::STATE_UNLINKING); - goto ack; - } - - dnl = dn->get_linkage(); + CDentry::linkage_t *dnl = dn->get_linkage(); // open inode? if (dnl->is_primary()) { - in = dnl->get_inode(); + CInode *in = dnl->get_inode(); dn->dir->unlink_inode(dn); ceph_assert(straydn); straydn->dir->link_primary_inode(straydn, in); @@ -11324,12 +11286,11 @@ void MDCache::handle_dentry_unlink(const cref_t &m) in->first = straydn->first; // update subtree map? - if (in->is_dir()) { + if (in->is_dir()) adjust_subtree_after_rename(in, dir, false); - } if (m->snapbl.length()) { - hadrealm = (in->snaprealm ? true : false); + bool hadrealm = (in->snaprealm ? true : false); in->decode_snap_blob(m->snapbl); ceph_assert(in->snaprealm); if (!hadrealm) @@ -11340,7 +11301,7 @@ void MDCache::handle_dentry_unlink(const cref_t &m) if (in->is_any_caps() && !in->state_test(CInode::STATE_EXPORTINGCAPS)) migrator->export_caps(in); - + straydn = NULL; } else { ceph_assert(!straydn); @@ -11348,12 +11309,6 @@ void MDCache::handle_dentry_unlink(const cref_t &m) dn->dir->unlink_inode(dn); } ceph_assert(dnl->is_null()); - dn->state_clear(CDentry::STATE_UNLINKING); - - MDSContext::vec finished; - dn->take_waiting(CDentry::WAIT_UNLINK_FINISH, finished); - mds->queue_waiters(finished); - } } @@ -11365,36 +11320,8 @@ void MDCache::handle_dentry_unlink(const cref_t &m) trim_dentry(straydn, ex); send_expire_messages(ex); } - return; - -ack: - ack = make_message(m->get_dirfrag(), m->get_dn()); - mds->send_message(ack, m->get_connection()); } -void MDCache::handle_dentry_unlink_ack(const cref_t &m) -{ - CDir *dir = get_dirfrag(m->get_dirfrag()); - if (!dir) { - dout(7) << __func__ << " don't have dirfrag " << m->get_dirfrag() << dendl; - } else { - CDentry *dn = dir->lookup(m->get_dn()); - if (!dn) { - dout(7) << __func__ << " don't have dentry " << *dir << " dn " << m->get_dn() << dendl; - } else { - dout(7) << __func__ << " on " << *dn << " ref " - << dn->replica_unlinking_ref << " -> " - << dn->replica_unlinking_ref - 1 << dendl; - dn->replica_unlinking_ref--; - if (!dn->replica_unlinking_ref) { - MDSContext::vec finished; - dn->take_waiting(CDentry::WAIT_UNLINK_STATE, finished); - mds->queue_waiters(finished); - } - dn->put(CDentry::PIN_WAITUNLINKSTATE); - } - } -} @@ -13010,7 +12937,7 @@ void MDCache::enqueue_scrub( std::string_view path, std::string_view tag, bool force, bool recursive, bool repair, - Formatter *f, Context *fin) + bool scrub_mdsdir, Formatter *f, Context *fin) { dout(10) << __func__ << " " << path << dendl; @@ -13044,7 +12971,8 @@ void MDCache::enqueue_scrub( } C_MDS_EnqueueScrub *cs = new C_MDS_EnqueueScrub(tag_str, f, fin); - cs->header = std::make_shared(tag_str, is_internal, force, recursive, repair); + cs->header = std::make_shared(tag_str, is_internal, force, + recursive, repair, scrub_mdsdir); mdr->internal_op_finish = cs; enqueue_scrub_work(mdr); diff --git a/ceph/src/mds/MDCache.h b/ceph/src/mds/MDCache.h index a99bed72a..d9f173038 100644 --- a/ceph/src/mds/MDCache.h +++ b/ceph/src/mds/MDCache.h @@ -913,7 +913,7 @@ class MDCache { void encode_remote_dentry_link(CDentry::linkage_t *dnl, bufferlist& bl); void decode_remote_dentry_link(CDir *dir, CDentry *dn, bufferlist::const_iterator& p); void send_dentry_link(CDentry *dn, MDRequestRef& mdr); - void send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& mdr, bool unlinking=false); + void send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& mdr); void wait_for_uncommitted_fragment(dirfrag_t dirfrag, MDSContext *c) { uncommitted_fragments.at(dirfrag).waiters.push_back(c); @@ -975,7 +975,7 @@ class MDCache { */ void enqueue_scrub(std::string_view path, std::string_view tag, bool force, bool recursive, bool repair, - Formatter *f, Context *fin); + bool scrub_mdsdir, Formatter *f, Context *fin); void repair_inode_stats(CInode *diri); void repair_dirfrag_stats(CDir *dir); void rdlock_dirfrags_stats(CInode *diri, MDSInternalContext *fin); @@ -1156,7 +1156,6 @@ class MDCache { void handle_discover_reply(const cref_t &m); void handle_dentry_link(const cref_t &m); void handle_dentry_unlink(const cref_t &m); - void handle_dentry_unlink_ack(const cref_t &m); int dump_cache(std::string_view fn, Formatter *f, double timeout); diff --git a/ceph/src/mds/MDLog.cc b/ceph/src/mds/MDLog.cc index 337c1025a..82899d2da 100644 --- a/ceph/src/mds/MDLog.cc +++ b/ceph/src/mds/MDLog.cc @@ -977,8 +977,14 @@ void MDLog::_recovery_thread(MDSContext *completion) inodeno_t const default_log_ino = MDS_INO_LOG_OFFSET + mds->get_nodeid(); jp.front = default_log_ino; int write_result = jp.save(mds->objecter); - // Nothing graceful we can do for this - ceph_assert(write_result >= 0); + if (write_result < 0) { + std::lock_guard l(mds->mds_lock); + if (mds->is_daemon_stopping()) { + return; + } + mds->damaged(); + ceph_abort(); // damaged should never return + } } else if (read_result == -CEPHFS_EBLOCKLISTED) { derr << "Blocklisted during JournalPointer read! Respawning..." << dendl; mds->respawn(); @@ -1420,6 +1426,7 @@ void MDLog::_replay_thread() le->_segment->num_events++; le->_segment->end = journaler->get_read_pos(); num_events++; + logger->set(l_mdl_ev, num_events); { std::lock_guard l(mds->mds_lock); @@ -1432,6 +1439,8 @@ void MDLog::_replay_thread() } logger->set(l_mdl_rdpos, pos); + logger->set(l_mdl_expos, journaler->get_expire_pos()); + logger->set(l_mdl_wrpos, journaler->get_write_pos()); } // done! @@ -1485,6 +1494,9 @@ void MDLog::standby_trim_segments() dout(10) << " removing segment" << dendl; mds->mdcache->standby_trim_segment(seg); remove_oldest_segment(); + if (pre_segments_size > 0) { + --pre_segments_size; + } removed_segment = true; } diff --git a/ceph/src/mds/MDSAuthCaps.cc b/ceph/src/mds/MDSAuthCaps.cc index f8158be68..d983f2d58 100644 --- a/ceph/src/mds/MDSAuthCaps.cc +++ b/ceph/src/mds/MDSAuthCaps.cc @@ -227,15 +227,14 @@ bool MDSAuthCaps::is_capable(std::string_view inode_path, uid_t new_uid, gid_t new_gid, const entity_addr_t& addr) const { - if (cct) - ldout(cct, 10) << __func__ << " inode(path /" << inode_path - << " owner " << inode_uid << ":" << inode_gid - << " mode 0" << std::oct << inode_mode << std::dec - << ") by caller " << caller_uid << ":" << caller_gid + ldout(g_ceph_context, 10) << __func__ << " inode(path /" << inode_path + << " owner " << inode_uid << ":" << inode_gid + << " mode 0" << std::oct << inode_mode << std::dec + << ") by caller " << caller_uid << ":" << caller_gid // << "[" << caller_gid_list << "]"; - << " mask " << mask - << " new " << new_uid << ":" << new_gid - << " cap: " << *this << dendl; + << " mask " << mask + << " new " << new_uid << ":" << new_gid + << " cap: " << *this << dendl; for (const auto& grant : grants) { if (grant.network.size() && @@ -339,7 +338,7 @@ void MDSAuthCaps::set_allow_all() {})); } -bool MDSAuthCaps::parse(CephContext *c, std::string_view str, ostream *err) +bool MDSAuthCaps::parse(std::string_view str, ostream *err) { // Special case for legacy caps if (str == "allow") { @@ -354,7 +353,6 @@ bool MDSAuthCaps::parse(CephContext *c, std::string_view str, ostream *err) MDSCapParser g; bool r = qi::phrase_parse(iter, end, g, ascii::space, *this); - cct = c; // set after parser self-assignment if (r && iter == end) { for (auto& grant : grants) { std::sort(grant.match.gids.begin(), grant.match.gids.end()); diff --git a/ceph/src/mds/MDSAuthCaps.h b/ceph/src/mds/MDSAuthCaps.h index 395c921fd..5fcbb1f2f 100644 --- a/ceph/src/mds/MDSAuthCaps.h +++ b/ceph/src/mds/MDSAuthCaps.h @@ -183,9 +183,8 @@ class MDSAuthCaps { public: MDSAuthCaps() = default; - explicit MDSAuthCaps(CephContext *cct_) : cct(cct_) {} - // this ctor is used by spirit/phoenix; doesn't need cct. + // this ctor is used by spirit/phoenix explicit MDSAuthCaps(const std::vector& grants_) : grants(grants_) {} void clear() { @@ -193,7 +192,7 @@ public: } void set_allow_all(); - bool parse(CephContext *cct, std::string_view str, std::ostream *err); + bool parse(std::string_view str, std::ostream *err); bool allow_all() const; bool is_capable(std::string_view inode_path, @@ -226,7 +225,6 @@ public: friend std::ostream &operator<<(std::ostream &out, const MDSAuthCaps &cap); private: - CephContext *cct = nullptr; std::vector grants; }; diff --git a/ceph/src/mds/MDSCacheObject.h b/ceph/src/mds/MDSCacheObject.h index 53d33460b..8710102b7 100644 --- a/ceph/src/mds/MDSCacheObject.h +++ b/ceph/src/mds/MDSCacheObject.h @@ -98,8 +98,8 @@ class MDSCacheObject { std::string_view generic_pin_name(int p) const; // printing - virtual void print(std::ostream& out) = 0; - virtual std::ostream& print_db_line_prefix(std::ostream& out) { + virtual void print(std::ostream& out) const = 0; + virtual std::ostream& print_db_line_prefix(std::ostream& out) const { return out << "mdscacheobject(" << this << ") "; } @@ -326,11 +326,7 @@ class MDSCacheObject { static uint64_t last_wait_seq; }; -std::ostream& operator<<(std::ostream& out, const mdsco_db_line_prefix& o); -// printer -std::ostream& operator<<(std::ostream& out, const MDSCacheObject &o); - -inline std::ostream& operator<<(std::ostream& out, MDSCacheObject &o) { +inline std::ostream& operator<<(std::ostream& out, const MDSCacheObject& o) { o.print(out); return out; } diff --git a/ceph/src/mds/MDSDaemon.cc b/ceph/src/mds/MDSDaemon.cc index ca8c3b656..d45acce06 100644 --- a/ceph/src/mds/MDSDaemon.cc +++ b/ceph/src/mds/MDSDaemon.cc @@ -157,6 +157,7 @@ void MDSDaemon::asok_command( // our response before seeing us disappear from mdsmap sleep(1); std::lock_guard l(mds_lock); + derr << "Exiting due to admin socket command" << dendl; suicide(); }); t.detach(); @@ -258,7 +259,9 @@ void MDSDaemon::set_up_admin_socket() r = admin_socket->register_command("dump_ops_in_flight", asok_hook, "show the ops currently in flight"); ceph_assert(r == 0); - r = admin_socket->register_command("ops", asok_hook, + r = admin_socket->register_command("ops " + "name=flags,type=CephChoices,strings=locks,n=N,req=false ", + asok_hook, "show the ops currently in flight"); ceph_assert(r == 0); r = admin_socket->register_command("dump_blocked_ops", @@ -286,7 +289,7 @@ void MDSDaemon::set_up_admin_socket() ceph_assert(r == 0); r = admin_socket->register_command("scrub start " "name=path,type=CephString " - "name=scrubops,type=CephChoices,strings=force|recursive|repair,n=N,req=false " + "name=scrubops,type=CephChoices,strings=force|recursive|repair|scrub_mdsdir,n=N,req=false " "name=tag,type=CephString,req=false", asok_hook, "scrub and inode and output results"); @@ -376,10 +379,6 @@ void MDSDaemon::set_up_admin_socket() asok_hook, "Evict a client session by id"); ceph_assert(r == 0); - r = admin_socket->register_command("session ls name=cap_dump,type=CephBool,req=false", - asok_hook, - "Enumerate connected CephFS clients"); - ceph_assert(r == 0); r = admin_socket->register_command("session config " "name=client_id,type=CephInt,req=true " "name=option,type=CephString,req=true " @@ -1074,7 +1073,7 @@ bool MDSDaemon::parse_caps(const AuthCapsInfo& info, MDSAuthCaps& caps) dout(10) << __func__ << ": parsing auth_cap_str='" << auth_cap_str << "'" << dendl; CachedStackStringStream cs; - if (caps.parse(g_ceph_context, auth_cap_str, cs.get())) { + if (caps.parse(auth_cap_str, cs.get())) { return true; } else { dout(1) << __func__ << ": auth cap parse error: " << cs->strv() << " parsing '" << auth_cap_str << "'" << dendl; @@ -1083,7 +1082,7 @@ bool MDSDaemon::parse_caps(const AuthCapsInfo& info, MDSAuthCaps& caps) } } -int MDSDaemon::ms_handle_authentication(Connection *con) +int MDSDaemon::ms_handle_fast_authentication(Connection *con) { /* N.B. without mds_lock! */ MDSAuthCaps caps; diff --git a/ceph/src/mds/MDSDaemon.h b/ceph/src/mds/MDSDaemon.h index 1fe872ba9..e7929d2c3 100644 --- a/ceph/src/mds/MDSDaemon.h +++ b/ceph/src/mds/MDSDaemon.h @@ -146,7 +146,7 @@ class MDSDaemon : public Dispatcher { private: bool ms_dispatch2(const ref_t &m) override; - int ms_handle_authentication(Connection *con) override; + int ms_handle_fast_authentication(Connection *con) override; void ms_handle_accept(Connection *con) override; void ms_handle_connect(Connection *con) override; bool ms_handle_reset(Connection *con) override; diff --git a/ceph/src/mds/MDSRank.cc b/ceph/src/mds/MDSRank.cc index a3191cb80..9a80534a4 100644 --- a/ceph/src/mds/MDSRank.cc +++ b/ceph/src/mds/MDSRank.cc @@ -38,6 +38,7 @@ #include "mon/MonClient.h" #include "common/HeartbeatMap.h" #include "ScrubStack.h" +#include "Mutation.h" #include "MDSRank.h" @@ -931,6 +932,12 @@ void MDSRank::respawn() } } +void MDSRank::abort(std::string_view msg) +{ + monc->flush_log(); + ceph_abort(msg); +} + void MDSRank::damaged() { ceph_assert(whoami != MDS_RANK_NONE); @@ -1178,7 +1185,6 @@ bool MDSRank::is_valid_message(const cref_t &m) { type == CEPH_MSG_CLIENT_RECONNECT || type == CEPH_MSG_CLIENT_RECLAIM || type == CEPH_MSG_CLIENT_REQUEST || - type == CEPH_MSG_CLIENT_REPLY || type == MSG_MDS_PEER_REQUEST || type == MSG_MDS_HEARTBEAT || type == MSG_MDS_TABLE_REQUEST || @@ -1232,7 +1238,6 @@ void MDSRank::handle_message(const cref_t &m) ALLOW_MESSAGES_FROM(CEPH_ENTITY_TYPE_CLIENT); // fall-thru case CEPH_MSG_CLIENT_REQUEST: - case CEPH_MSG_CLIENT_REPLY: server->dispatch(m); break; case MSG_MDS_PEER_REQUEST: @@ -1470,10 +1475,12 @@ void MDSRank::send_message_mds(const ref_t& m, const entity_addrvec_t & messenger->send_to_mds(ref_t(m).detach(), addr); } -void MDSRank::forward_message_mds(const cref_t& m, mds_rank_t mds) +void MDSRank::forward_message_mds(MDRequestRef& mdr, mds_rank_t mds) { ceph_assert(mds != whoami); + auto m = mdr->release_client_request(); + /* * don't actually forward if non-idempotent! * client has to do it. although the MDS will ignore duplicate requests, @@ -1485,6 +1492,10 @@ void MDSRank::forward_message_mds(const cref_t& m, mds_rank_t md // tell the client where it should go auto session = get_session(m); + if (!session) { + dout(1) << "no session found, failed to forward client request " << mdr << dendl; + return; + } auto f = make_message(m->get_tid(), mds, m->get_num_fwd()+1, client_must_resend); send_message_client(f, session); } @@ -2604,11 +2615,31 @@ void MDSRankDispatcher::handle_asok_command( int r = 0; CachedStackStringStream css; bufferlist outbl; - if (command == "dump_ops_in_flight" || - command == "ops") { + dout(10) << __func__ << ": " << command << dendl; + if (command == "dump_ops_in_flight") { if (!op_tracker.dump_ops_in_flight(f)) { *css << "op_tracker disabled; set mds_enable_op_tracker=true to enable"; } + } else if (command == "ops") { + vector flags; + cmd_getval(cmdmap, "flags", flags); + std::unique_lock l(mds_lock, std::defer_lock); + auto lambda = OpTracker::default_dumper; + if (flags.size()) { + /* use std::function if we actually want to capture flags someday */ + lambda = [](const TrackedOp& op, Formatter* f) { + auto* req = dynamic_cast(&op); + if (req) { + req->dump_with_mds_lock(f); + } else { + op.dump_type(f); + } + }; + l.lock(); + } + if (!op_tracker.dump_ops_in_flight(f, false, {""}, false, lambda)) { + *css << "op_tracker disabled; set mds_enable_op_tracker=true to enable"; + } } else if (command == "dump_blocked_ops") { if (!op_tracker.dump_ops_in_flight(f, true)) { *css << "op_tracker disabled; set mds_enable_op_tracker=true to enable"; @@ -2973,6 +3004,7 @@ void MDSRank::command_scrub_start(Formatter *f, bool force = false; bool recursive = false; bool repair = false; + bool scrub_mdsdir = false; for (auto &op : scrubop_vec) { if (op == "force") force = true; @@ -2980,10 +3012,13 @@ void MDSRank::command_scrub_start(Formatter *f, recursive = true; else if (op == "repair") repair = true; + else if (op == "scrub_mdsdir" && path == "/") + scrub_mdsdir = true; } std::lock_guard l(mds_lock); - mdcache->enqueue_scrub(path, tag, force, recursive, repair, f, on_finish); + mdcache->enqueue_scrub(path, tag, force, recursive, repair, scrub_mdsdir, + f, on_finish); // scrub_dentry() finishers will dump the data for us; we're done! } @@ -2993,7 +3028,7 @@ void MDSRank::command_tag_path(Formatter *f, C_SaferCond scond; { std::lock_guard l(mds_lock); - mdcache->enqueue_scrub(path, tag, true, true, false, f, &scond); + mdcache->enqueue_scrub(path, tag, true, true, false, false, f, &scond); } scond.wait(); } @@ -3785,6 +3820,7 @@ const char** MDSRankDispatcher::get_tracked_conf_keys() const "mds_extraordinary_events_dump_interval", "mds_inject_rename_corrupt_dentry_first", "mds_inject_journal_corrupt_dentry_first", + "mds_session_metadata_threshold", NULL }; return KEYS; diff --git a/ceph/src/mds/MDSRank.h b/ceph/src/mds/MDSRank.h index c52c83708..b61fc178c 100644 --- a/ceph/src/mds/MDSRank.h +++ b/ceph/src/mds/MDSRank.h @@ -151,29 +151,6 @@ class Finisher; class ScrubStack; class C_ExecAndReply; -struct MDSMetaRequest { -private: - int _op; - CDentry *_dentry; - ceph_tid_t _tid; -public: - explicit MDSMetaRequest(int op, CDentry *dn, ceph_tid_t tid) : - _op(op), _dentry(dn), _tid(tid) { - if (_dentry) { - _dentry->get(CDentry::PIN_PURGING); - } - } - ~MDSMetaRequest() { - if (_dentry) { - _dentry->put(CDentry::PIN_PURGING); - } - } - - CDentry *get_dentry() { return _dentry; } - int get_op() { return _op; } - ceph_tid_t get_tid() { return _tid; } -}; - /** * The public part of this class's interface is what's exposed to all * the various subsystems (server, mdcache, etc), such as pointers @@ -295,6 +272,13 @@ class MDSRank { return count * _heartbeat_reset_grace; } + /** + * Abort the MDS and flush any clog messages. + * + * Callers must already hold mds_lock. + */ + void abort(std::string_view msg); + /** * Report state DAMAGED to the mon, and then pass on to respawn(). Call * this when an unrecoverable error is encountered while attempting @@ -322,7 +306,7 @@ class MDSRank { void send_message_mds(const ref_t& m, mds_rank_t mds); void send_message_mds(const ref_t& m, const entity_addrvec_t &addr); - void forward_message_mds(const cref_t& req, mds_rank_t mds); + void forward_message_mds(MDRequestRef& mdr, mds_rank_t mds); void send_message_client_counted(const ref_t& m, client_t client); void send_message_client_counted(const ref_t& m, Session* session); void send_message_client_counted(const ref_t& m, const ConnectionRef& connection); @@ -439,8 +423,6 @@ class MDSRank { PerfCounters *logger = nullptr, *mlogger = nullptr; OpTracker op_tracker; - std::map internal_client_requests; - // The last different state I held before current MDSMap::DaemonState last_state = MDSMap::STATE_BOOT; // The state assigned to me by the MDSMap diff --git a/ceph/src/mds/Migrator.cc b/ceph/src/mds/Migrator.cc index 130ed08c1..8bd875c34 100644 --- a/ceph/src/mds/Migrator.cc +++ b/ceph/src/mds/Migrator.cc @@ -3379,8 +3379,6 @@ void Migrator::decode_import_dir(bufferlist::const_iterator& blp, if (le) le->metablob.add_import_dir(dir); - int num_imported = 0; - // take all waiters on this dir // NOTE: a pass of imported data is guaranteed to get all of my waiters because // a replica's presense in my cache implies/forces it's presense in authority's. diff --git a/ceph/src/mds/Mutation.cc b/ceph/src/mds/Mutation.cc index 39eee4721..b52e04a63 100644 --- a/ceph/src/mds/Mutation.cc +++ b/ceph/src/mds/Mutation.cc @@ -268,7 +268,7 @@ void MutationImpl::cleanup() drop_pins(); } -void MutationImpl::_dump_op_descriptor_unlocked(ostream& stream) const +void MutationImpl::_dump_op_descriptor(ostream& stream) const { stream << "Mutation"; } @@ -446,22 +446,17 @@ int MDRequestImpl::compare_paths() cref_t MDRequestImpl::release_client_request() { - msg_lock.lock(); + std::lock_guard l(lock); cref_t req; req.swap(client_request); client_request = req; - msg_lock.unlock(); return req; } void MDRequestImpl::reset_peer_request(const cref_t& req) { - msg_lock.lock(); - cref_t old; - old.swap(peer_request); + std::lock_guard l(lock); peer_request = req; - msg_lock.unlock(); - old.reset(); } void MDRequestImpl::print(ostream &out) const @@ -474,88 +469,89 @@ void MDRequestImpl::print(ostream &out) const out << ")"; } -void MDRequestImpl::dump(Formatter *f) const -{ - _dump(f); -} - -void MDRequestImpl::_dump(Formatter *f) const -{ - f->dump_string("flag_point", state_string()); - f->dump_stream("reqid") << reqid; - { - msg_lock.lock(); - auto _client_request = client_request; - auto _peer_request =peer_request; - msg_lock.unlock(); - - if (_client_request) { - f->dump_string("op_type", "client_request"); - f->open_object_section("client_info"); - f->dump_stream("client") << _client_request->get_orig_source(); - f->dump_int("tid", _client_request->get_tid()); - f->close_section(); // client_info - } else if (is_peer()) { // replies go to an existing mdr - f->dump_string("op_type", "peer_request"); - f->open_object_section("leader_info"); - f->dump_stream("leader") << peer_to_mds; - f->close_section(); // leader_info - - if (_peer_request) { - f->open_object_section("request_info"); - f->dump_int("attempt", _peer_request->get_attempt()); - f->dump_string("op_type", - MMDSPeerRequest::get_opname(_peer_request->get_op())); - f->dump_int("lock_type", _peer_request->get_lock_type()); - f->dump_stream("object_info") << _peer_request->get_object_info(); - f->dump_stream("srcdnpath") << _peer_request->srcdnpath; - f->dump_stream("destdnpath") << _peer_request->destdnpath; - f->dump_stream("witnesses") << _peer_request->witnesses; - f->dump_bool("has_inode_export", - _peer_request->inode_export_v != 0); - f->dump_int("inode_export_v", _peer_request->inode_export_v); - f->dump_stream("op_stamp") << _peer_request->op_stamp; - f->close_section(); // request_info - } - } - else if (internal_op != -1) { // internal request - f->dump_string("op_type", "internal_op"); - f->dump_int("internal_op", internal_op); - f->dump_string("op_name", ceph_mds_op_name(internal_op)); - } - else { - f->dump_string("op_type", "no_available_op_found"); +void MDRequestImpl::_dump(Formatter *f, bool has_mds_lock) const +{ + std::lock_guard l(lock); + f->dump_string("flag_point", _get_state_string()); + f->dump_object("reqid", reqid); + if (client_request) { + f->dump_string("op_type", "client_request"); + } else if (is_peer()) { // replies go to an existing mdr + f->dump_string("op_type", "peer_request"); + f->open_object_section("leader_info"); + f->dump_stream("leader") << peer_to_mds; + f->close_section(); // leader_info + + if (peer_request) { + f->open_object_section("request_info"); + f->dump_int("attempt", peer_request->get_attempt()); + f->dump_string("op_type", + MMDSPeerRequest::get_opname(peer_request->get_op())); + f->dump_int("lock_type", peer_request->get_lock_type()); + f->dump_stream("object_info") << peer_request->get_object_info(); + f->dump_stream("srcdnpath") << peer_request->srcdnpath; + f->dump_stream("destdnpath") << peer_request->destdnpath; + f->dump_stream("witnesses") << peer_request->witnesses; + f->dump_bool("has_inode_export", + peer_request->inode_export_v != 0); + f->dump_int("inode_export_v", peer_request->inode_export_v); + f->dump_stream("op_stamp") << peer_request->op_stamp; + f->close_section(); // request_info } } + else if (internal_op != -1) { // internal request + f->dump_string("op_type", "internal_op"); + f->dump_int("internal_op", internal_op); + f->dump_string("op_name", ceph_mds_op_name(internal_op)); + } + else { + f->dump_string("op_type", "no_available_op_found"); + } + { f->open_array_section("events"); - std::lock_guard l(lock); for (auto& i : events) { f->dump_object("event", i); } f->close_section(); // events } + + if (has_mds_lock) { + f->open_array_section("locks"); + for (auto& l : locks) { + f->open_object_section("lock"); + { + auto* mdsco = l.lock->get_parent(); + f->dump_object("object", *mdsco); + CachedStackStringStream css; + *css << *mdsco; + f->dump_string("object_string", css->strv()); + f->dump_object("lock", *l.lock); + f->dump_int("flags", l.flags); + f->dump_int("wrlock_target", l.wrlock_target); + } + f->close_section(); + } + f->close_section(); + } else { + f->dump_null("locks"); + } } -void MDRequestImpl::_dump_op_descriptor_unlocked(ostream& stream) const +void MDRequestImpl::_dump_op_descriptor(ostream& os) const { - msg_lock.lock(); - auto _client_request = client_request; - auto _peer_request = peer_request; - msg_lock.unlock(); - - if (_client_request) { - _client_request->print(stream); - } else if (_peer_request) { - _peer_request->print(stream); + if (client_request) { + client_request->print(os); + } else if (peer_request) { + peer_request->print(os); } else if (is_peer()) { - stream << "peer_request:" << reqid; + os << "peer_request:" << reqid; } else if (internal_op >= 0) { - stream << "internal op " << ceph_mds_op_name(internal_op) << ":" << reqid; + os << "internal op " << ceph_mds_op_name(internal_op) << ":" << reqid; } else { // drat, it's triggered by a peer request, but we don't have a message // FIXME - stream << "rejoin:" << reqid; + os << "rejoin:" << reqid; } } diff --git a/ceph/src/mds/Mutation.h b/ceph/src/mds/Mutation.h index 09ebe7052..b963dee08 100644 --- a/ceph/src/mds/Mutation.h +++ b/ceph/src/mds/Mutation.h @@ -221,7 +221,7 @@ public: } virtual void dump(ceph::Formatter *f) const {} - void _dump_op_descriptor_unlocked(std::ostream& stream) const override; + void _dump_op_descriptor(std::ostream& stream) const override; metareqid_t reqid; __u32 attempt = 0; // which attempt for this request @@ -396,7 +396,9 @@ struct MDRequestImpl : public MutationImpl { std::unique_ptr release_batch_op(); void print(std::ostream &out) const override; - void dump(ceph::Formatter *f) const override; + void dump_with_mds_lock(ceph::Formatter* f) const { + return _dump(f, true); + } ceph::cref_t release_client_request(); void reset_peer_request(const ceph::cref_t& req=nullptr); @@ -416,6 +418,7 @@ struct MDRequestImpl : public MutationImpl { CInode *in[2] = {}; CDentry *straydn = nullptr; snapid_t snapid = CEPH_NOSNAP; + snapid_t snapid_diff_other = CEPH_NOSNAP; CInode *tracei = nullptr; CDentry *tracedn = nullptr; @@ -452,10 +455,11 @@ struct MDRequestImpl : public MutationImpl { bool waited_for_osdmap = false; protected: - void _dump(ceph::Formatter *f) const override; - void _dump_op_descriptor_unlocked(std::ostream& stream) const override; -private: - mutable ceph::spinlock msg_lock; + void _dump(ceph::Formatter *f) const override { + _dump(f, false); + } + void _dump(ceph::Formatter *f, bool has_mds_lock) const; + void _dump_op_descriptor(std::ostream& stream) const override; }; struct MDPeerUpdate { diff --git a/ceph/src/mds/ScrubHeader.h b/ceph/src/mds/ScrubHeader.h index eb79090b0..a5d35f61c 100644 --- a/ceph/src/mds/ScrubHeader.h +++ b/ceph/src/mds/ScrubHeader.h @@ -35,9 +35,9 @@ class CInode; class ScrubHeader { public: ScrubHeader(std::string_view tag_, bool is_tag_internal_, bool force_, - bool recursive_, bool repair_) + bool recursive_, bool repair_, bool scrub_mdsdir_ = false) : tag(tag_), is_tag_internal(is_tag_internal_), force(force_), - recursive(recursive_), repair(repair_) {} + recursive(recursive_), repair(repair_), scrub_mdsdir(scrub_mdsdir_) {} // Set after construction because it won't be known until we've // started resolving path and locking @@ -46,6 +46,7 @@ public: bool get_recursive() const { return recursive; } bool get_repair() const { return repair; } bool get_force() const { return force; } + bool get_scrub_mdsdir() const { return scrub_mdsdir; } bool is_internal_tag() const { return is_tag_internal; } inodeno_t get_origin() const { return origin; } const std::string& get_tag() const { return tag; } @@ -69,6 +70,7 @@ protected: const bool force; const bool recursive; const bool repair; + const bool scrub_mdsdir; inodeno_t origin; bool repaired = false; // May be set during scrub if repairs happened diff --git a/ceph/src/mds/ScrubStack.cc b/ceph/src/mds/ScrubStack.cc index 5a3c2bf81..6d799343f 100644 --- a/ceph/src/mds/ScrubStack.cc +++ b/ceph/src/mds/ScrubStack.cc @@ -67,6 +67,11 @@ int ScrubStack::_enqueue(MDSCacheObject *obj, ScrubHeaderRef& header, bool top) dout(10) << __func__ << " with {" << *in << "}" << ", already in scrubbing" << dendl; return -CEPHFS_EBUSY; } + if(in->state_test(CInode::STATE_PURGING)) { + dout(10) << *obj << " is purging, skip pushing into scrub stack" << dendl; + // treating this as success since purge will make sure this inode goes away + return 0; + } dout(10) << __func__ << " with {" << *in << "}" << ", top=" << top << dendl; in->scrub_initialize(header); @@ -75,6 +80,11 @@ int ScrubStack::_enqueue(MDSCacheObject *obj, ScrubHeaderRef& header, bool top) dout(10) << __func__ << " with {" << *dir << "}" << ", already in scrubbing" << dendl; return -CEPHFS_EBUSY; } + if(dir->get_inode()->state_test(CInode::STATE_PURGING)) { + dout(10) << *obj << " is purging, skip pushing into scrub stack" << dendl; + // treating this as success since purge will make sure this dir inode goes away + return 0; + } dout(10) << __func__ << " with {" << *dir << "}" << ", top=" << top << dendl; // The edge directory must be in memory @@ -109,7 +119,20 @@ int ScrubStack::enqueue(CInode *in, ScrubHeaderRef& header, bool top) << ", conflicting tag " << header->get_tag() << dendl; return -CEPHFS_EEXIST; } - + if (header->get_scrub_mdsdir()) { + filepath fp; + mds_rank_t rank; + rank = mdcache->mds->get_nodeid(); + if(rank >= 0 && rank < MAX_MDS) { + fp.set_path("", MDS_INO_MDSDIR(rank)); + } + int r = _enqueue(mdcache->get_inode(fp.get_ino()), header, true); + if (r < 0) { + return r; + } + //to make sure mdsdir is always on the top + top = false; + } int r = _enqueue(in, header, top); if (r < 0) return r; @@ -673,6 +696,12 @@ void ScrubStack::scrub_status(Formatter *f) { } *optcss << "force"; } + if (header->get_scrub_mdsdir()) { + if (have_more) { + *optcss << ","; + } + *optcss << "scrub_mdsdir"; + } f->dump_string("options", optcss->strv()); f->close_section(); // scrub id @@ -837,6 +866,18 @@ void ScrubStack::dispatch(const cref_t &m) } } +bool ScrubStack::remove_inode_if_stacked(CInode *in) { + MDSCacheObject *obj = dynamic_cast(in); + if(obj->item_scrub.is_on_list()) { + dout(20) << "removing inode " << *in << " from scrub_stack" << dendl; + obj->put(MDSCacheObject::PIN_SCRUBQUEUE); + obj->item_scrub.remove_myself(); + stack_size--; + return true; + } + return false; +} + void ScrubStack::handle_scrub(const cref_t &m) { diff --git a/ceph/src/mds/ScrubStack.h b/ceph/src/mds/ScrubStack.h index 62a4a5299..756ebd9cb 100644 --- a/ceph/src/mds/ScrubStack.h +++ b/ceph/src/mds/ScrubStack.h @@ -101,6 +101,8 @@ public: void dispatch(const cref_t &m); + bool remove_inode_if_stacked(CInode *in); + MDCache *mdcache; protected: diff --git a/ceph/src/mds/Server.cc b/ceph/src/mds/Server.cc index bf12cb7e2..ced4ecffa 100644 --- a/ceph/src/mds/Server.cc +++ b/ceph/src/mds/Server.cc @@ -31,7 +31,6 @@ #include "Mutation.h" #include "MetricsHandler.h" #include "cephfs_features.h" -#include "MDSContext.h" #include "msg/Messenger.h" @@ -118,7 +117,7 @@ public: } void _forward(mds_rank_t t) override { MDCache* mdcache = server->mdcache; - mdcache->mds->forward_message_mds(mdr->release_client_request(), t); + mdcache->mds->forward_message_mds(mdr, t); mdr->set_mds_stamp(ceph_clock_now()); for (auto& m : batch_reqs) { if (!m->killed) @@ -138,7 +137,7 @@ public: batch_reqs.clear(); server->reply_client_request(mdr, make_message(*mdr->client_request, r)); } - void print(std::ostream& o) { + void print(std::ostream& o) const override { o << "[batch front=" << *mdr << "]"; } }; @@ -242,6 +241,8 @@ void Server::create_logger() "Request type remove snapshot latency"); plb.add_time_avg(l_mdss_req_renamesnap_latency, "req_renamesnap_latency", "Request type rename snapshot latency"); + plb.add_time_avg(l_mdss_req_snapdiff_latency, "req_snapdiff_latency", + "Request type snapshot difference latency"); plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY); plb.add_u64_counter(l_mdss_dispatch_client_request, "dispatch_client_request", @@ -359,9 +360,6 @@ void Server::dispatch(const cref_t &m) case CEPH_MSG_CLIENT_REQUEST: handle_client_request(ref_cast(m)); return; - case CEPH_MSG_CLIENT_REPLY: - handle_client_reply(ref_cast(m)); - return; case CEPH_MSG_CLIENT_RECLAIM: handle_client_reclaim(ref_cast(m)); return; @@ -1982,23 +1980,20 @@ void Server::journal_and_reply(MDRequestRef& mdr, CInode *in, CDentry *dn, LogEv mdr->pin(dn); early_reply(mdr, in, dn); - + mdr->committing = true; submit_mdlog_entry(le, fin, mdr, __func__); - + if (mdr->client_request && mdr->client_request->is_queued_for_replay()) { if (mds->queue_one_replay()) { dout(10) << " queued next replay op" << dendl; } else { dout(10) << " journaled last replay op" << dendl; } - } else if (mdr->did_early_reply) { + } else if (mdr->did_early_reply) mds->locker->drop_rdlocks_for_early_reply(mdr.get()); - if (dn && dn->is_waiter_for(CDentry::WAIT_UNLINK_FINISH)) - mdlog->flush(); - } else { + else mdlog->flush(); - } } void Server::submit_mdlog_entry(LogEvent *le, MDSLogContextBase *fin, MDRequestRef& mdr, @@ -2125,6 +2120,9 @@ void Server::perf_gather_op_latency(const cref_t &req, utime_t l case CEPH_MDS_OP_RENAMESNAP: code = l_mdss_req_renamesnap_latency; break; + case CEPH_MDS_OP_READDIR_SNAPDIFF: + code = l_mdss_req_snapdiff_latency; + break; default: dout(1) << ": unknown client op" << dendl; return; @@ -2295,10 +2293,6 @@ void Server::reply_client_request(MDRequestRef& mdr, const ref_t & mds->send_message_client(reply, session); } - if (client_inst.name.is_mds() && reply->get_op() == CEPH_MDS_OP_RENAME) { - mds->send_message(reply, mdr->client_request->get_connection()); - } - if (req->is_queued_for_replay() && (mdr->has_completed || reply->get_result() < 0)) { if (reply->get_result() < 0) { @@ -2388,7 +2382,8 @@ void Server::set_trace_dist(const ref_t &reply, // inode if (in) { in->encode_inodestat(bl, session, NULL, snapid, 0, mdr->getattr_caps); - dout(20) << "set_trace_dist added in " << *in << dendl; + dout(20) << "set_trace_dist added snap " << snapid << " in " << *in + << dendl; reply->head.is_target = 1; } else reply->head.is_target = 0; @@ -2530,38 +2525,6 @@ void Server::handle_client_request(const cref_t &req) return; } -void Server::handle_client_reply(const cref_t &reply) -{ - dout(4) << "handle_client_reply " << *reply << dendl; - - ceph_assert(reply->is_safe()); - ceph_tid_t tid = reply->get_tid(); - - if (mds->internal_client_requests.count(tid) == 0) { - dout(1) << " no pending request on tid " << tid << dendl; - return; - } - - auto &req = mds->internal_client_requests.at(tid); - CDentry *dn = req.get_dentry(); - - switch (reply->get_op()) { - case CEPH_MDS_OP_RENAME: - if (dn) { - dn->state_clear(CDentry::STATE_REINTEGRATING); - - MDSContext::vec finished; - dn->take_waiting(CDentry::WAIT_REINTEGRATE_FINISH, finished); - mds->queue_waiters(finished); - } - break; - default: - dout(5) << " unknown client op " << reply->get_op() << dendl; - } - - mds->internal_client_requests.erase(tid); -} - void Server::handle_osd_map() { /* Note that we check the OSDMAP_FULL flag directly rather than @@ -2752,6 +2715,9 @@ void Server::dispatch_client_request(MDRequestRef& mdr) case CEPH_MDS_OP_RENAMESNAP: handle_client_renamesnap(mdr); break; + case CEPH_MDS_OP_READDIR_SNAPDIFF: + handle_client_readdir_snapdiff(mdr); + break; default: dout(1) << " unknown client op " << req->get_op() << dendl; @@ -3471,10 +3437,12 @@ CInode* Server::prepare_new_inode(MDRequestRef& mdr, CDir *dir, inodeno_t useino _inode->mode |= S_ISGID; } } else { - _inode->gid = mdr->client_request->get_caller_gid(); + _inode->gid = mdr->client_request->get_owner_gid(); + ceph_assert(_inode->gid != (unsigned)-1); } - _inode->uid = mdr->client_request->get_caller_uid(); + _inode->uid = mdr->client_request->get_owner_uid(); + ceph_assert(_inode->uid != (unsigned)-1); _inode->btime = _inode->ctime = _inode->mtime = _inode->atime = mdr->get_op_stamp(); @@ -4028,6 +3996,7 @@ void Server::handle_client_getattr(MDRequestRef& mdr, bool is_lookup) } else { dout(20) << __func__ << ": LOOKUP op, wait for previous same getattr ops to respond. " << *mdr << dendl; em.first->second->add_request(mdr); + mdr->mark_event("joining batch lookup"); return; } } else { @@ -4039,6 +4008,7 @@ void Server::handle_client_getattr(MDRequestRef& mdr, bool is_lookup) } else { dout(20) << __func__ << ": GETATTR op, wait for previous same getattr ops to respond. " << *mdr << dendl; em.first->second->add_request(mdr); + mdr->mark_event("joining batch getattr"); return; } } @@ -4082,6 +4052,24 @@ void Server::handle_client_getattr(MDRequestRef& mdr, bool is_lookup) } else if (ref->filelock.is_stable() || ref->filelock.get_num_wrlocks() > 0 || !ref->filelock.can_read(mdr->get_client())) { + /* Since we're taking advantage of an optimization here: + * + * We cannot suddenly, due to a changing condition, add this filelock as + * it can cause lock-order deadlocks. In this case, that condition is the + * lock state changes between request retries. If that happens, we need + * to check if we've acquired the other locks in this vector. If we have, + * then we need to drop those locks and retry. + */ + if (mdr->is_rdlocked(&ref->linklock) || + mdr->is_rdlocked(&ref->authlock) || + mdr->is_rdlocked(&ref->xattrlock)) { + /* start over */ + dout(20) << " dropping locks and restarting request because filelock state change" << dendl; + mds->locker->drop_locks(mdr.get()); + mdr->drop_local_auth_pins(); + mds->queue_waiter(new C_MDS_RetryRequest(mdcache, mdr)); + return; + } lov.add_rdlock(&ref->filelock); mdr->locking_state &= ~MutationImpl::ALL_LOCKED; } @@ -4403,6 +4391,7 @@ void Server::handle_client_open(MDRequestRef& mdr) } MutationImpl::LockOpVec lov; + lov.add_rdlock(&cur->snaplock); unsigned mask = req->head.args.open.mask; if (mask) { @@ -4564,21 +4553,11 @@ void Server::handle_client_openc(MDRequestRef& mdr) if (!dn) return; - if (is_unlink_pending(dn)) { - wait_for_pending_unlink(dn, mdr); - return; - } - CDentry::linkage_t *dnl = dn->get_projected_linkage(); if (!excl && !dnl->is_null()) { // it existed. ceph_assert(mdr.get()->is_rdlocked(&dn->lock)); - MutationImpl::LockOpVec lov; - lov.add_rdlock(&dnl->get_inode()->snaplock); - if (!mds->locker->acquire_locks(mdr, lov)) - return; - handle_client_open(mdr); return; } @@ -4732,6 +4711,47 @@ void Server::handle_client_openc(MDRequestRef& mdr) } +void Server::_finalize_readdir(MDRequestRef& mdr, + CInode *diri, + CDir* dir, + bool start, + bool end, + __u16 flags, + __u32 numfiles, + bufferlist& dirbl, + bufferlist& dnbl) +{ + const cref_t &req = mdr->client_request; + Session *session = mds->get_session(req); + + session->touch_readdir_cap(numfiles); + + if (end) { + flags |= CEPH_READDIR_FRAG_END; + if (start) + flags |= CEPH_READDIR_FRAG_COMPLETE; // FIXME: what purpose does this serve + } + + // finish final blob + encode(numfiles, dirbl); + encode(flags, dirbl); + dirbl.claim_append(dnbl); + + // yay, reply + dout(10) << "reply to " << *req << " readdir num=" << numfiles + << " bytes=" << dirbl.length() + << " start=" << (int)start + << " end=" << (int)end + << dendl; + mdr->reply_extra_bl = dirbl; + + // bump popularity. NOTE: this doesn't quite capture it. + mds->balancer->hit_dir(dir, META_POP_READDIR, numfiles); + + // reply + mdr->tracei = diri; + respond_to_request(mdr, 0); +} void Server::handle_client_readdir(MDRequestRef& mdr) { @@ -4759,6 +4779,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) if (logger) logger->inc(l_mdss_cap_acquisition_throttle); + mdr->mark_event("cap_acquisition_throttle"); mds->timer.add_event_after(caps_throttle_retry_request_timeout, new C_MDS_RetryRequest(mdcache, mdr)); return; } @@ -4937,7 +4958,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) dout(10) << " ran out of room, stopping at " << dnbl.length() << " < " << bytes_left << dendl; break; } - + unsigned start_len = dnbl.length(); // dentry @@ -4946,7 +4967,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) mds->locker->issue_client_lease(dn, in, mdr, now, dnbl); // inode - dout(12) << "including inode " << *in << dendl; + dout(12) << "including inode in " << *in << " snap " << snapid << dendl; int r = in->encode_inodestat(dnbl, mdr->session, realm, snapid, bytes_left - (int)dnbl.length()); if (r < 0) { // chop off dn->name, lease @@ -4962,39 +4983,12 @@ void Server::handle_client_readdir(MDRequestRef& mdr) // touch dn mdcache->lru.lru_touch(dn); } - - session->touch_readdir_cap(numfiles); - __u16 flags = 0; - if (end) { - flags = CEPH_READDIR_FRAG_END; - if (start) - flags |= CEPH_READDIR_FRAG_COMPLETE; // FIXME: what purpose does this serve - } // client only understand END and COMPLETE flags ? if (req_flags & CEPH_READDIR_REPLY_BITFLAGS) { flags |= CEPH_READDIR_HASH_ORDER | CEPH_READDIR_OFFSET_HASH; } - - // finish final blob - encode(numfiles, dirbl); - encode(flags, dirbl); - dirbl.claim_append(dnbl); - - // yay, reply - dout(10) << "reply to " << *req << " readdir num=" << numfiles - << " bytes=" << dirbl.length() - << " start=" << (int)start - << " end=" << (int)end - << dendl; - mdr->reply_extra_bl = dirbl; - - // bump popularity. NOTE: this doesn't quite capture it. - mds->balancer->hit_dir(dir, META_POP_READDIR, numfiles); - - // reply - mdr->tracei = diri; - respond_to_request(mdr, 0); + _finalize_readdir(mdr, diri, dir, start, end, flags, numfiles, dirbl, dnbl); } @@ -6820,84 +6814,6 @@ void Server::handle_client_getvxattr(MDRequestRef& mdr) // ------------------------------------------------ -struct C_WaitUnlinkToFinish : public MDSContext { -protected: - MDCache *mdcache; - CDentry *dn; - MDSContext *fin; - - MDSRank *get_mds() override - { - ceph_assert(mdcache != NULL); - return mdcache->mds; - } - -public: - C_WaitUnlinkToFinish(MDCache *m, CDentry *d, MDSContext *f) : - mdcache(m), dn(d), fin(f) {} - void finish(int r) override { - fin->complete(r); - dn->put(CDentry::PIN_PURGING); - } -}; - -bool Server::is_unlink_pending(CDentry *dn) -{ - CDentry::linkage_t *dnl = dn->get_projected_linkage(); - if (!dnl->is_null() && dn->state_test(CDentry::STATE_UNLINKING)) { - return true; - } - return false; -} - -void Server::wait_for_pending_unlink(CDentry *dn, MDRequestRef& mdr) -{ - dout(20) << __func__ << " dn " << *dn << dendl; - mds->locker->drop_locks(mdr.get()); - auto fin = new C_MDS_RetryRequest(mdcache, mdr); - dn->get(CDentry::PIN_PURGING); - dn->add_waiter(CDentry::WAIT_UNLINK_FINISH, new C_WaitUnlinkToFinish(mdcache, dn, fin)); -} - -struct C_WaitReintegrateToFinish : public MDSContext { -protected: - MDCache *mdcache; - CDentry *dn; - MDSContext *fin; - - MDSRank *get_mds() override - { - ceph_assert(mdcache != NULL); - return mdcache->mds; - } - -public: - C_WaitReintegrateToFinish(MDCache *m, CDentry *d, MDSContext *f) : - mdcache(m), dn(d), fin(f) {} - void finish(int r) override { - fin->complete(r); - dn->put(CDentry::PIN_PURGING); - } -}; - -bool Server::is_reintegrate_pending(CDentry *dn) -{ - CDentry::linkage_t *dnl = dn->get_projected_linkage(); - if (!dnl->is_null() && dn->state_test(CDentry::STATE_REINTEGRATING)) { - return true; - } - return false; -} - -void Server::wait_for_pending_reintegrate(CDentry *dn, MDRequestRef& mdr) -{ - dout(20) << __func__ << " dn " << *dn << dendl; - mds->locker->drop_locks(mdr.get()); - auto fin = new C_MDS_RetryRequest(mdcache, mdr); - dn->get(CDentry::PIN_PURGING); - dn->add_waiter(CDentry::WAIT_REINTEGRATE_FINISH, new C_WaitReintegrateToFinish(mdcache, dn, fin)); -} - // MKNOD class C_MDS_mknod_finish : public ServerLogContext { @@ -6964,11 +6880,6 @@ void Server::handle_client_mknod(MDRequestRef& mdr) if (!dn) return; - if (is_unlink_pending(dn)) { - wait_for_pending_unlink(dn, mdr); - return; - } - CDir *dir = dn->get_dir(); CInode *diri = dir->get_inode(); if (!check_access(mdr, diri, MAY_WRITE)) @@ -7067,11 +6978,6 @@ void Server::handle_client_mkdir(MDRequestRef& mdr) if (!dn) return; - if (is_unlink_pending(dn)) { - wait_for_pending_unlink(dn, mdr); - return; - } - CDir *dir = dn->get_dir(); CInode *diri = dir->get_inode(); @@ -7167,11 +7073,6 @@ void Server::handle_client_symlink(MDRequestRef& mdr) if (!dn) return; - if (is_unlink_pending(dn)) { - wait_for_pending_unlink(dn, mdr); - return; - } - CDir *dir = dn->get_dir(); CInode *diri = dir->get_inode(); @@ -7283,11 +7184,6 @@ void Server::handle_client_link(MDRequestRef& mdr) targeti = ret.second->get_projected_linkage()->get_inode(); } - if (is_unlink_pending(destdn)) { - wait_for_pending_unlink(destdn, mdr); - return; - } - ceph_assert(destdn->get_projected_linkage()->is_null()); if (req->get_alternate_name().size() > alternate_name_max) { dout(10) << " alternate_name longer than " << alternate_name_max << dendl; @@ -7341,14 +7237,9 @@ void Server::handle_client_link(MDRequestRef& mdr) SnapRealm *target_realm = target_pin->find_snaprealm(); if (target_pin != dir->inode && target_realm->get_subvolume_ino() != - dir->inode->find_snaprealm()->get_subvolume_ino()) { - if (target_pin->is_stray()) { - mds->locker->drop_locks(mdr.get()); - targeti->add_waiter(CInode::WAIT_UNLINK, - new C_MDS_RetryRequest(mdcache, mdr)); - mdlog->flush(); - return; - } + dir->inode->find_snaprealm()->get_subvolume_ino() && + /* The inode is temporarily located in the stray dir pending reintegration */ + !target_pin->is_stray()) { dout(7) << "target is in different subvolume, failing..." << dendl; respond_to_request(mdr, -CEPHFS_EXDEV); return; @@ -7579,17 +7470,11 @@ void Server::_link_remote_finish(MDRequestRef& mdr, bool inc, mdr->apply(); MDRequestRef null_ref; - if (inc) { + if (inc) mdcache->send_dentry_link(dn, null_ref); - } else { - dn->state_clear(CDentry::STATE_UNLINKING); + else mdcache->send_dentry_unlink(dn, NULL, null_ref); - - MDSContext::vec finished; - dn->take_waiting(CDentry::WAIT_UNLINK_FINISH, finished); - mdcache->mds->queue_waiters(finished); - } - + // bump target popularity mds->balancer->hit_inode(targeti, META_POP_IWR); mds->balancer->hit_dir(dn->get_dir(), META_POP_IWR); @@ -7963,25 +7848,10 @@ void Server::handle_client_unlink(MDRequestRef& mdr) if (rmdir) mdr->disable_lock_cache(); - CDentry *dn = rdlock_path_xlock_dentry(mdr, false, true); if (!dn) return; - if (is_reintegrate_pending(dn)) { - wait_for_pending_reintegrate(dn, mdr); - return; - } - - // notify replica MDSes the dentry is under unlink - if (!dn->state_test(CDentry::STATE_UNLINKING)) { - dn->state_set(CDentry::STATE_UNLINKING); - mdcache->send_dentry_unlink(dn, nullptr, mdr, true); - if (dn->replica_unlinking_ref) { - return; - } - } - CDentry::linkage_t *dnl = dn->get_linkage(client, mdr); ceph_assert(!dnl->is_null()); CInode *in = dnl->get_inode(); @@ -7998,13 +7868,11 @@ void Server::handle_client_unlink(MDRequestRef& mdr) if (rmdir) { // do empty directory checks if (_dir_is_nonempty_unlocked(mdr, in)) { - dn->state_clear(CDentry::STATE_UNLINKING); - respond_to_request(mdr, -CEPHFS_ENOTEMPTY); + respond_to_request(mdr, -CEPHFS_ENOTEMPTY); return; } } else { dout(7) << "handle_client_unlink on dir " << *in << ", returning error" << dendl; - dn->state_clear(CDentry::STATE_UNLINKING); respond_to_request(mdr, -CEPHFS_EISDIR); return; } @@ -8012,7 +7880,6 @@ void Server::handle_client_unlink(MDRequestRef& mdr) if (rmdir) { // unlink dout(7) << "handle_client_rmdir on non-dir " << *in << ", returning error" << dendl; - dn->state_clear(CDentry::STATE_UNLINKING); respond_to_request(mdr, -CEPHFS_ENOTDIR); return; } @@ -8020,10 +7887,8 @@ void Server::handle_client_unlink(MDRequestRef& mdr) CInode *diri = dn->get_dir()->get_inode(); if ((!mdr->has_more() || mdr->more()->witnessed.empty())) { - if (!check_access(mdr, diri, MAY_WRITE)) { - dn->state_clear(CDentry::STATE_UNLINKING); + if (!check_access(mdr, diri, MAY_WRITE)) return; - } } // -- create stray dentry? -- @@ -8062,7 +7927,6 @@ void Server::handle_client_unlink(MDRequestRef& mdr) if (in->is_dir() && _dir_is_nonempty(mdr, in)) { respond_to_request(mdr, -CEPHFS_ENOTEMPTY); - dn->state_clear(CDentry::STATE_UNLINKING); return; } @@ -8262,14 +8126,9 @@ void Server::_unlink_local_finish(MDRequestRef& mdr, } mdr->apply(); - - dn->state_clear(CDentry::STATE_UNLINKING); + mdcache->send_dentry_unlink(dn, straydn, mdr); - - MDSContext::vec finished; - dn->take_waiting(CDentry::WAIT_UNLINK_FINISH, finished); - mdcache->mds->queue_waiters(finished); - + if (straydn) { // update subtree map? if (strayin->is_dir()) @@ -8284,7 +8143,7 @@ void Server::_unlink_local_finish(MDRequestRef& mdr, // reply respond_to_request(mdr, 0); - + // removing a new dn? dn->get_dir()->try_remove_unlinked_dn(dn); @@ -8743,16 +8602,6 @@ void Server::handle_client_rename(MDRequestRef& mdr) if (!destdn) return; - if (is_unlink_pending(destdn)) { - wait_for_pending_unlink(destdn, mdr); - return; - } - - if (is_unlink_pending(srcdn)) { - wait_for_pending_unlink(srcdn, mdr); - return; - } - dout(10) << " destdn " << *destdn << dendl; CDir *destdir = destdn->get_dir(); ceph_assert(destdir->is_auth()); @@ -9169,12 +9018,6 @@ void Server::handle_client_rename(MDRequestRef& mdr) C_MDS_rename_finish *fin = new C_MDS_rename_finish(this, mdr, srcdn, destdn, straydn); journal_and_reply(mdr, srci, destdn, le, fin); - - // trigger to flush mdlog in case reintegrating or migrating the stray dn, - // because the link requests maybe waiting. - if (srcdn->get_dir()->inode->is_stray()) { - mdlog->flush(); - } mds->balancer->maybe_fragment(destdn->get_dir(), false); } @@ -9708,7 +9551,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C // primary+remote link merge? bool linkmerge = (srcdnl->get_inode() == oldin); if (linkmerge) - ceph_assert(srcdnl->is_primary() || destdnl->is_remote()); + ceph_assert(srcdnl->is_primary() && destdnl->is_remote()); bool new_in_snaprealm = false; bool new_oldin_snaprealm = false; @@ -9785,14 +9628,6 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C srcdn->get_dir()->unlink_inode(srcdn); - // After the stray dn being unlinked from the corresponding inode in case of - // reintegrate_stray/migrate_stray, just wake up the waitiers. - MDSContext::vec finished; - in->take_waiting(CInode::WAIT_UNLINK, finished); - if (!finished.empty()) { - mds->queue_waiters(finished); - } - // dest if (srcdn_was_remote) { if (!linkmerge) { @@ -11372,7 +11207,8 @@ void Server::handle_client_renamesnap(MDRequestRef& mdr) return; } - snapid_t snapid = diri->snaprealm->resolve_snapname(srcname, diri->ino()); + snapid_t snapid = diri->snaprealm->resolve_snapname(srcname, diri->ino()); + dout(10) << " snapname " << srcname << " is " << snapid << dendl; // lock snap @@ -11457,6 +11293,159 @@ void Server::_renamesnap_finish(MDRequestRef& mdr, CInode *diri, snapid_t snapid respond_to_request(mdr, 0); } +void Server::handle_client_readdir_snapdiff(MDRequestRef& mdr) +{ + const cref_t& req = mdr->client_request; + Session* session = mds->get_session(req); + MutationImpl::LockOpVec lov; + CInode* diri = rdlock_path_pin_ref(mdr, false, true); + if (!diri) return; + + // it's a directory, right? + if (!diri->is_dir()) { + // not a dir + dout(10) << "reply to " << *req << " snapdiff -CEPHFS_ENOTDIR" << dendl; + respond_to_request(mdr, -CEPHFS_ENOTDIR); + return; + } + + auto num_caps = session->get_num_caps(); + auto session_cap_acquisition = session->get_cap_acquisition(); + + if (num_caps > static_cast(max_caps_per_client * max_caps_throttle_ratio) && session_cap_acquisition >= cap_acquisition_throttle) { + dout(20) << "snapdiff throttled. max_caps_per_client: " << max_caps_per_client << " num_caps: " << num_caps + << " session_cap_acquistion: " << session_cap_acquisition << " cap_acquisition_throttle: " << cap_acquisition_throttle << dendl; + if (logger) + logger->inc(l_mdss_cap_acquisition_throttle); + + mds->timer.add_event_after(caps_throttle_retry_request_timeout, new C_MDS_RetryRequest(mdcache, mdr)); + return; + } + + lov.add_rdlock(&diri->filelock); + lov.add_rdlock(&diri->dirfragtreelock); + + if (!mds->locker->acquire_locks(mdr, lov)) + return; + + if (!check_access(mdr, diri, MAY_READ)) + return; + + // which frag? + frag_t fg = (__u32)req->head.args.snapdiff.frag; + unsigned req_flags = (__u32)req->head.args.snapdiff.flags; + string offset_str = req->get_path2(); + + __u32 offset_hash = 0; + if (!offset_str.empty()) { + offset_hash = ceph_frag_value(diri->hash_dentry_name(offset_str)); + } else { + offset_hash = (__u32)req->head.args.snapdiff.offset_hash; + } + + dout(10) << " frag " << fg << " offset '" << offset_str << "'" + << " offset_hash " << offset_hash << " flags " << req_flags << dendl; + + // does the frag exist? + if (diri->dirfragtree[fg.value()] != fg) { + frag_t newfg; + if (req_flags & CEPH_READDIR_REPLY_BITFLAGS) { + if (fg.contains((unsigned)offset_hash)) { + newfg = diri->dirfragtree[offset_hash]; + } else { + // client actually wants next frag + newfg = diri->dirfragtree[fg.value()]; + } + } else { + offset_str.clear(); + newfg = diri->dirfragtree[fg.value()]; + } + dout(10) << " adjust frag " << fg << " -> " << newfg << " " << diri->dirfragtree << dendl; + fg = newfg; + } + + CDir* dir = try_open_auth_dirfrag(diri, fg, mdr); + if (!dir) return; + + // ok! + dout(10) << __func__<< " on " << *dir << dendl; + ceph_assert(dir->is_auth()); + + if (!dir->is_complete()) { + if (dir->is_frozen()) { + dout(7) << "dir is frozen " << *dir << dendl; + mds->locker->drop_locks(mdr.get()); + mdr->drop_local_auth_pins(); + dir->add_waiter(CDir::WAIT_UNFREEZE, new C_MDS_RetryRequest(mdcache, mdr)); + return; + } + // fetch + dout(10) << " incomplete dir contents for snapdiff on " << *dir << ", fetching" << dendl; + dir->fetch(new C_MDS_RetryRequest(mdcache, mdr), true); + return; + } + +#ifdef MDS_VERIFY_FRAGSTAT + dir->verify_fragstat(); +#endif + + utime_t now = ceph_clock_now(); + mdr->set_mds_stamp(now); + + mdr->snapid_diff_other = (uint64_t)req->head.args.snapdiff.snap_other; + if (mdr->snapid_diff_other == mdr->snapid || + mdr->snapid == CEPH_NOSNAP || + mdr->snapid_diff_other == CEPH_NOSNAP) { + dout(10) << "reply to " << *req << " snapdiff -CEPHFS_EINVAL" << dendl; + respond_to_request(mdr, -CEPHFS_EINVAL); + } + + dout(10) << __func__ + << " snap " << mdr->snapid + << " vs. snap " << mdr->snapid_diff_other + << dendl; + + SnapRealm* realm = diri->find_snaprealm(); + + unsigned max = req->head.args.snapdiff.max_entries; + if (!max) + max = dir->get_num_any(); // whatever, something big. + unsigned max_bytes = req->head.args.snapdiff.max_bytes; + if (!max_bytes) + // make sure at least one item can be encoded + max_bytes = (512 << 10) + g_conf()->mds_max_xattr_pairs_size; + + // start final blob + bufferlist dirbl; + DirStat ds; + ds.frag = dir->get_frag(); + ds.auth = dir->get_dir_auth().first; + if (dir->is_auth() && !forward_all_requests_to_auth) + dir->get_dist_spec(ds.dist, mds->get_nodeid()); + + dir->encode_dirstat(dirbl, mdr->session->info, ds); + + // count bytes available. + // this isn't perfect, but we should capture the main variable/unbounded size items! + int front_bytes = dirbl.length() + sizeof(__u32) + sizeof(__u8) * 2; + int bytes_left = max_bytes - front_bytes; + bytes_left -= get_snap_trace(session, realm).length(); + + _readdir_diff( + now, + mdr, + diri, + dir, + realm, + max, + bytes_left, + offset_str, + offset_hash, + req_flags, + dirbl); +} + + /** * Return true if server is in state RECONNECT and this * client has not yet reconnected. @@ -11487,3 +11476,265 @@ const bufferlist& Server::get_snap_trace(client_t client, SnapRealm *realm) cons Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(client.v)); return get_snap_trace(session, realm); } + +void Server::_readdir_diff( + utime_t now, + MDRequestRef& mdr, + CInode* diri, + CDir* dir, + SnapRealm* realm, + unsigned max_entries, + int bytes_left, + const string& offset_str, + uint32_t offset_hash, + unsigned req_flags, + bufferlist& dirbl) +{ + // build dir contents + bufferlist dnbl; + __u32 numfiles = 0; + + snapid_t snapid = mdr->snapid; + snapid_t snapid_prev = mdr->snapid_diff_other; + if (snapid < snapid_prev) { + std::swap(snapid, snapid_prev); + } + bool from_the_beginning = !offset_hash && offset_str.empty(); + // skip all dns < dentry_key_t(snapid, offset_str, offset_hash) + dentry_key_t skip_key(snapid_prev, offset_str.c_str(), offset_hash); + + bool end = build_snap_diff( + mdr, + dir, + bytes_left, + from_the_beginning ? nullptr : & skip_key, + snapid_prev, + snapid, + dnbl, + [&](CDentry* dn, CInode* in, bool exists) { + string name; + snapid_t effective_snapid; + const auto& dn_name = dn->get_name(); + // provide the first snapid for removed entries and + // the last one for existent ones + effective_snapid = exists ? snapid : snapid_prev; + name.append(dn_name); + if ((int)(dnbl.length() + name.length() + sizeof(__u32) + sizeof(LeaseStat)) > bytes_left) { + dout(10) << " ran out of room, stopping at " << dnbl.length() << " < " << bytes_left << dendl; + return false; + } + + auto diri = dir->get_inode(); + auto hash = ceph_frag_value(diri->hash_dentry_name(dn_name)); + unsigned start_len = dnbl.length(); + dout(10) << "inc dn " << *dn << " as " << name + << std::hex << " hash 0x" << hash << std::dec + << dendl; + encode(name, dnbl); + mds->locker->issue_client_lease(dn, in, mdr, now, dnbl); + + // inode + dout(10) << "inc inode " << *in << " snap " << effective_snapid << dendl; + int r = in->encode_inodestat(dnbl, mdr->session, realm, effective_snapid, bytes_left - (int)dnbl.length()); + if (r < 0) { + // chop off dn->name, lease + dout(10) << " ran out of room, stopping at " + << start_len << " < " << bytes_left << dendl; + bufferlist keep; + keep.substr_of(dnbl, 0, start_len); + dnbl.swap(keep); + return false; + } + + // touch dn + mdcache->lru.lru_touch(dn); + ++numfiles; + return true; + }); + + __u16 flags = 0; + if (req_flags & CEPH_READDIR_REPLY_BITFLAGS) { + flags |= CEPH_READDIR_HASH_ORDER | CEPH_READDIR_OFFSET_HASH; + } + + std::swap(mdr->snapid, mdr->snapid_diff_other); // we want opponent snapid to be used for tracei + + _finalize_readdir(mdr, diri, dir, from_the_beginning, end, flags, numfiles, + dirbl, dnbl); +} + +bool Server::build_snap_diff( + MDRequestRef& mdr, + CDir* dir, + int bytes_left, + dentry_key_t* skip_key, + snapid_t snapid_prev, + snapid_t snapid, + const bufferlist& dnbl, + std::function add_result_cb) +{ + client_t client = mdr->client_request->get_source().num(); + + struct EntryInfo { + CDentry* dn = nullptr; + CInode* in = nullptr; + utime_t mtime; + + void reset() { + *this = EntryInfo(); + } + } before; + + auto insert_deleted = [&](EntryInfo& ei) { + dout(20) << "build_snap_diff deleted file " << ei.dn->get_name() << " " + << ei.dn->first << "/" << ei.dn->last << dendl; + int r = add_result_cb(ei.dn, ei.in, false); + ei.reset(); + return r; + }; + + auto it = !skip_key ? dir->begin() : dir->lower_bound(*skip_key); + + while(it != dir->end()) { + CDentry* dn = it->second; + dout(20) << __func__ << " " << it->first << "->" << *dn << dendl; + ++it; + if (dn->state_test(CDentry::STATE_PURGING)) + continue; + + bool dnp = dn->use_projected(client, mdr); + CDentry::linkage_t* dnl = dnp ? dn->get_projected_linkage() : dn->get_linkage(); + + if (dnl->is_null()) { + dout(20) << __func__ << " linkage is null, skipping" << dendl; + continue; + } + + if (dn->last < snapid_prev || dn->first > snapid) { + dout(20) << __func__ << " not in range, skipping" << dendl; + continue; + } + if (skip_key) { + skip_key->snapid = dn->last; + if (!(*skip_key < dn->key())) + continue; + } + + CInode* in = dnl->get_inode(); + if (in && in->ino() == CEPH_INO_CEPH) + continue; + + // remote link? + // better for the MDS to do the work, if we think the client will stat any of these files. + if (dnl->is_remote() && !in) { + in = mdcache->get_inode(dnl->get_remote_ino()); + dout(20) << __func__ << " remote in: " << *in << " ino " << std::hex << dnl->get_remote_ino() << std::dec << dendl; + if (in) { + dn->link_remote(dnl, in); + } else if (dn->state_test(CDentry::STATE_BADREMOTEINO)) { + dout(10) << "skipping bad remote ino on " << *dn << dendl; + continue; + } else { + // touch everything i _do_ have + for (auto& p : *dir) { + if (!p.second->get_linkage()->is_null()) + mdcache->lru.lru_touch(p.second); + } + + // already issued caps and leases, reply immediately. + if (dnbl.length() > 0) { + mdcache->open_remote_dentry(dn, dnp, new C_MDSInternalNoop); + dout(10) << " open remote dentry after caps were issued, stopping at " + << dnbl.length() << " < " << bytes_left << dendl; + } else { + mds->locker->drop_locks(mdr.get()); + mdr->drop_local_auth_pins(); + mdcache->open_remote_dentry(dn, dnp, new C_MDS_RetryRequest(mdcache, mdr)); + } + return false; + } + } + ceph_assert(in); + + utime_t mtime = in->get_inode()->mtime; + + if (in->is_dir()) { + + // we need to maintain the order of entries (determined by their name hashes) + // hence need to insert the previous entry if any immediately. + if (before.dn) { + if (!insert_deleted(before)) { + break; + } + } + + bool exists = true; + if (snapid_prev < dn->first && dn->last < snapid) { + dout(20) << __func__ << " skipping inner " << dn->get_name() << " " + << dn->first << "/" << dn->last << dendl; + continue; + } else if (dn->first <= snapid_prev && dn->last < snapid) { + // dir deleted + dout(20) << __func__ << " deleted dir " << dn->get_name() << " " + << dn->first << "/" << dn->last << dendl; + exists = false; + } + bool r = add_result_cb(dn, in, exists); + if (!r) { + break; + } + } else { + if (snapid_prev >= dn->first && snapid <= dn->last) { + dout(20) << __func__ << " skipping unchanged " << dn->get_name() << " " + << dn->first << "/" << dn->last << dendl; + continue; + } else if (snapid_prev < dn->first && snapid > dn->last) { + dout(20) << __func__ << " skipping inner modification " << dn->get_name() << " " + << dn->first << "/" << dn->last << dendl; + continue; + } + string_view name_before = + before.dn ? string_view(before.dn->get_name()) : string_view(); + if (before.dn && dn->get_name() != name_before) { + if (!insert_deleted(before)) { + break; + } + before.reset(); + } + if (snapid_prev >= dn->first && snapid_prev <= dn->last) { + dout(30) << __func__ << " dn_before " << dn->get_name() << " " + << dn->first << "/" << dn->last << dendl; + before = EntryInfo {dn, in, mtime}; + continue; + } else { + if (before.dn && dn->get_name() == name_before) { + if (mtime == before.mtime) { + dout(30) << __func__ << " timestamp not changed " << dn->get_name() << " " + << dn->first << "/" << dn->last + << " " << mtime + << dendl; + before.reset(); + continue; + } else { + dout(30) << __func__ << " timestamp changed " << dn->get_name() << " " + << dn->first << "/" << dn->last + << " " << before.mtime << " vs. " << mtime + << dendl; + before.reset(); + } + } + dout(20) << __func__ << " new file " << dn->get_name() << " " + << dn->first << "/" << dn->last + << dendl; + ceph_assert(snapid >= dn->first && snapid <= dn->last); + } + if (!add_result_cb(dn, in, true)) { + break; + } + } + } + if (before.dn) { + insert_deleted(before); + } + return it == dir->end(); +} diff --git a/ceph/src/mds/Server.h b/ceph/src/mds/Server.h index a269d6cb4..81a5933ba 100644 --- a/ceph/src/mds/Server.h +++ b/ceph/src/mds/Server.h @@ -68,6 +68,7 @@ enum { l_mdss_req_readdir_latency, l_mdss_req_rename_latency, l_mdss_req_renamesnap_latency, + l_mdss_req_snapdiff_latency, l_mdss_req_rmdir_latency, l_mdss_req_rmsnap_latency, l_mdss_req_rmxattr_latency, @@ -158,7 +159,6 @@ public: // -- requests -- void handle_client_request(const cref_t &m); - void handle_client_reply(const cref_t &m); void journal_and_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn, LogEvent *le, MDSLogContextBase *fin); @@ -236,12 +236,6 @@ public: void handle_client_fsync(MDRequestRef& mdr); - bool is_unlink_pending(CDentry *dn); - void wait_for_pending_unlink(CDentry *dn, MDRequestRef& mdr); - - bool is_reintegrate_pending(CDentry *dn); - void wait_for_pending_reintegrate(CDentry *dn, MDRequestRef& mdr); - // open void handle_client_open(MDRequestRef& mdr); void handle_client_openc(MDRequestRef& mdr); // O_CREAT variant. @@ -299,6 +293,7 @@ public: void _rmsnap_finish(MDRequestRef& mdr, CInode *diri, snapid_t snapid); void handle_client_renamesnap(MDRequestRef& mdr); void _renamesnap_finish(MDRequestRef& mdr, CInode *diri, snapid_t snapid); + void handle_client_readdir_snapdiff(MDRequestRef& mdr); // helpers bool _rename_prepare_witness(MDRequestRef& mdr, mds_rank_t who, std::set &witnesse, @@ -480,6 +475,37 @@ private: void reply_client_request(MDRequestRef& mdr, const ref_t &reply); void flush_session(Session *session, MDSGatherBuilder& gather); + void _finalize_readdir(MDRequestRef& mdr, + CInode *diri, + CDir* dir, + bool start, + bool end, + __u16 flags, + __u32 numfiles, + bufferlist& dirbl, + bufferlist& dnbl); + void _readdir_diff( + utime_t now, + MDRequestRef& mdr, + CInode* diri, + CDir* dir, + SnapRealm* realm, + unsigned max_entries, + int bytes_left, + const std::string& offset_str, + uint32_t offset_hash, + unsigned req_flags, + bufferlist& dirbl); + bool build_snap_diff( + MDRequestRef& mdr, + CDir* dir, + int bytes_left, + dentry_key_t* skip_key, + snapid_t snapid_before, + snapid_t snapid, + const bufferlist& dnbl, + std::function add_result_cb); + MDSRank *mds; MDCache *mdcache; MDLog *mdlog; diff --git a/ceph/src/mds/SessionMap.cc b/ceph/src/mds/SessionMap.cc index 2364c973e..720396338 100644 --- a/ceph/src/mds/SessionMap.cc +++ b/ceph/src/mds/SessionMap.cc @@ -45,6 +45,11 @@ class SessionMapIOContext : public MDSIOContextBase }; }; +SessionMap::SessionMap(MDSRank *m) + : mds(m), + mds_session_metadata_threshold(g_conf().get_val("mds_session_metadata_threshold")) { +} + void SessionMap::register_perfcounters() { PerfCountersBuilder plb(g_ceph_context, "mds_sessions", @@ -66,6 +71,8 @@ void SessionMap::register_perfcounters() plb.add_u64(l_mdssm_avg_load, "average_load", "Average Load"); plb.add_u64(l_mdssm_avg_session_uptime, "avg_session_uptime", "Average session uptime"); + plb.add_u64(l_mdssm_metadata_threshold_sessions_evicted, "mdthresh_evicted", + "Sessions evicted on reaching metadata threshold"); logger = plb.create_perf_counters(); g_ceph_context->get_perfcounters_collection()->add(logger); @@ -375,6 +382,11 @@ public: }; } +bool SessionMap::validate_and_encode_session(MDSRank *mds, Session *session, bufferlist& bl) { + session->info.encode(bl, mds->mdsmap->get_up_features()); + return bl.length() < mds_session_metadata_threshold; +} + void SessionMap::save(MDSContext *onsave, version_t needv) { dout(10) << __func__ << ": needv " << needv << ", v " << version << dendl; @@ -410,6 +422,7 @@ void SessionMap::save(MDSContext *onsave, version_t needv) dout(20) << " updating keys:" << dendl; map to_set; + std::set to_blocklist; for(std::set::iterator i = dirty_sessions.begin(); i != dirty_sessions.end(); ++i) { const entity_name_t name = *i; @@ -420,13 +433,19 @@ void SessionMap::save(MDSContext *onsave, version_t needv) session->is_stale() || session->is_killing()) { dout(20) << " " << name << dendl; - // Serialize K - CachedStackStringStream css; - *css << name; // Serialize V bufferlist bl; - session->info.encode(bl, mds->mdsmap->get_up_features()); + if (!validate_and_encode_session(mds, session, bl)) { + derr << __func__ << ": session (" << name << ") exceeds" + << " sesion metadata threshold - blocklisting" << dendl; + to_blocklist.emplace(name); + continue; + } + + // Serialize K + CachedStackStringStream css; + *css << name; // Add to RADOS op to_set[std::string(css->strv())] = bl; @@ -461,6 +480,8 @@ void SessionMap::save(MDSContext *onsave, version_t needv) 0, new C_OnFinisher(new C_IO_SM_Save(this, version), mds->finisher)); + apply_blocklist(to_blocklist); + logger->inc(l_mdssm_metadata_threshold_sessions_evicted, to_blocklist.size()); } void SessionMap::_save_finish(version_t v) @@ -823,7 +844,8 @@ void SessionMap::save_if_dirty(const std::set &tgt_sessions, { ceph_assert(gather_bld != NULL); - std::vector write_sessions; + std::set to_blocklist; + std::map write_sessions; // Decide which sessions require a write for (std::set::iterator i = tgt_sessions.begin(); @@ -848,13 +870,24 @@ void SessionMap::save_if_dirty(const std::set &tgt_sessions, // need to pre-empt that. continue; } + + // Serialize V + bufferlist bl; + if (!validate_and_encode_session(mds, session, bl)) { + derr << __func__ << ": session (" << session_id << ") exceeds" + << " sesion metadata threshold - blocklisting" << dendl; + to_blocklist.emplace(session_id); + continue; + } + // Okay, passed all our checks, now we write // this session out. The version we write // into the OMAP may now be higher-versioned // than the version in the header, but that's // okay because it's never a problem to have // an overly-fresh copy of a session. - write_sessions.push_back(*i); + write_sessions.emplace(session_id, std::move(bl)); + session->clear_dirty_completed_requests(); } dout(4) << __func__ << ": writing " << write_sessions.size() << dendl; @@ -862,21 +895,15 @@ void SessionMap::save_if_dirty(const std::set &tgt_sessions, // Batch writes into mds_sessionmap_keys_per_op const uint32_t kpo = g_conf()->mds_sessionmap_keys_per_op; map to_set; - for (uint32_t i = 0; i < write_sessions.size(); ++i) { - const entity_name_t &session_id = write_sessions[i]; - Session *session = session_map[session_id]; - session->clear_dirty_completed_requests(); + uint32_t i = 0; + for (auto &[session_id, bl] : write_sessions) { // Serialize K CachedStackStringStream css; *css << session_id; - // Serialize V - bufferlist bl; - session->info.encode(bl, mds->mdsmap->get_up_features()); - // Add to RADOS op - to_set[css->str()] = bl; + to_set[css->str()] = std::move(bl); // Complete this write transaction? if (i == write_sessions.size() - 1 @@ -895,7 +922,11 @@ void SessionMap::save_if_dirty(const std::set &tgt_sessions, new C_IO_SM_Save_One(this, on_safe), mds->finisher)); } + ++i; } + + apply_blocklist(to_blocklist); + logger->inc(l_mdssm_metadata_threshold_sessions_evicted, to_blocklist.size()); } // ================= @@ -1109,6 +1140,10 @@ void SessionMap::handle_conf_change(const std::set& changed) }; apply_to_open_sessions(mut); } + + if (changed.count("mds_session_metadata_threshold")) { + mds_session_metadata_threshold = g_conf().get_val("mds_session_metadata_threshold"); + } } void SessionMap::update_average_session_age() { @@ -1120,6 +1155,20 @@ void SessionMap::update_average_session_age() { logger->set(l_mdssm_avg_session_uptime, (uint64_t)avg_uptime); } +void SessionMap::apply_blocklist(const std::set& victims) { + if (victims.empty()) { + return; + } + + C_GatherBuilder gather(g_ceph_context, new C_MDSInternalNoop); + for (auto &victim : victims) { + CachedStackStringStream css; + mds->evict_client(victim.num(), false, g_conf()->mds_session_blocklist_on_evict, *css, + gather.new_sub()); + } + gather.activate(); +} + int SessionFilter::parse( const std::vector &args, std::ostream *ss) diff --git a/ceph/src/mds/SessionMap.h b/ceph/src/mds/SessionMap.h index e59f7f264..ddf227be9 100644 --- a/ceph/src/mds/SessionMap.h +++ b/ceph/src/mds/SessionMap.h @@ -45,6 +45,7 @@ enum { l_mdssm_total_load, l_mdssm_avg_load, l_mdssm_avg_session_uptime, + l_mdssm_metadata_threshold_sessions_evicted, l_mdssm_last, }; @@ -589,7 +590,7 @@ protected: class SessionMap : public SessionMapStore { public: SessionMap() = delete; - explicit SessionMap(MDSRank *m) : mds(m) {} + explicit SessionMap(MDSRank *m); ~SessionMap() override { @@ -838,6 +839,11 @@ private: } time avg_birth_time = clock::zero(); + + size_t mds_session_metadata_threshold; + + bool validate_and_encode_session(MDSRank *mds, Session *session, bufferlist& bl); + void apply_blocklist(const std::set& victims); }; std::ostream& operator<<(std::ostream &out, const Session &s); diff --git a/ceph/src/mds/SimpleLock.cc b/ceph/src/mds/SimpleLock.cc index 76448ee9d..b23915f94 100644 --- a/ceph/src/mds/SimpleLock.cc +++ b/ceph/src/mds/SimpleLock.cc @@ -31,13 +31,14 @@ void SimpleLock::dump(ceph::Formatter *f) const { f->close_section(); f->dump_string("state", get_state_name(get_state())); + f->dump_string("type", get_lock_type_name(get_type())); f->dump_bool("is_leased", is_leased()); f->dump_int("num_rdlocks", get_num_rdlocks()); f->dump_int("num_wrlocks", get_num_wrlocks()); f->dump_int("num_xlocks", get_num_xlocks()); f->open_object_section("xlock_by"); - if (get_xlock_by()) { - get_xlock_by()->dump(f); + if (auto mut = get_xlock_by(); mut) { + f->dump_object("reqid", mut->reqid); } f->close_section(); } diff --git a/ceph/src/mds/SimpleLock.h b/ceph/src/mds/SimpleLock.h index 725c4488c..2a7a5fc80 100644 --- a/ceph/src/mds/SimpleLock.h +++ b/ceph/src/mds/SimpleLock.h @@ -417,6 +417,7 @@ public: ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE || state == LOCK_XLOCKSNAP || state == LOCK_LOCK_XLOCK || state == LOCK_LOCK || /* if we are a leader of a peer */ + state == LOCK_PREXLOCK || state == LOCK_SYNC || is_locallock()); --more()->num_xlock; parent->put(MDSCacheObject::PIN_LOCK); diff --git a/ceph/src/mds/SnapRealm.cc b/ceph/src/mds/SnapRealm.cc index c7a7d75bc..9d303bcb0 100644 --- a/ceph/src/mds/SnapRealm.cc +++ b/ceph/src/mds/SnapRealm.cc @@ -255,7 +255,7 @@ snapid_t SnapRealm::resolve_snapname(std::string_view n, inodeno_t atino, snapid //if (num && p->second.snapid == num) //return p->first; if (actual && p->second.name == n) - return p->first; + return p->first; if (!actual && p->second.name == pname && p->second.ino == pino) return p->first; } diff --git a/ceph/src/mds/StrayManager.cc b/ceph/src/mds/StrayManager.cc index aa4c95e79..d288ce661 100644 --- a/ceph/src/mds/StrayManager.cc +++ b/ceph/src/mds/StrayManager.cc @@ -20,6 +20,7 @@ #include "mds/MDLog.h" #include "mds/CDir.h" #include "mds/CDentry.h" +#include "mds/ScrubStack.h" #include "events/EUpdate.h" #include "messages/MClientRequest.h" @@ -281,17 +282,6 @@ void StrayManager::_purge_stray_logged(CDentry *dn, version_t pdv, MutationRef& dir->remove_dentry(dn); } - // Once we are here normally the waiter list are mostly empty - // but in corner case that the clients pass a invalidate ino, - // which maybe under unlinking, the link caller will add the - // request to the waiter list. We need try to wake them up - // anyway. - MDSContext::vec finished; - in->take_waiting(CInode::WAIT_UNLINK, finished); - if (!finished.empty()) { - mds->queue_waiters(finished); - } - // drop inode inodeno_t ino = in->ino(); if (in->is_dirty()) @@ -311,6 +301,11 @@ void StrayManager::enqueue(CDentry *dn, bool trunc) CInode *in = dnl->get_inode(); ceph_assert(in); + //remove inode from scrub stack if it is being purged + if(mds->scrubstack->remove_inode_if_stacked(in)) { + dout(20) << "removed " << *in << " from the scrub stack" << dendl; + } + /* We consider a stray to be purging as soon as it is enqueued, to avoid * enqueing it twice */ dn->state_set(CDentry::STATE_PURGING); @@ -681,27 +676,19 @@ void StrayManager::reintegrate_stray(CDentry *straydn, CDentry *rdn) dout(10) << __func__ << " " << *straydn << " to " << *rdn << dendl; logger->inc(l_mdc_strays_reintegrated); - + // rename it to remote linkage . filepath src(straydn->get_name(), straydn->get_dir()->ino()); filepath dst(rdn->get_name(), rdn->get_dir()->ino()); - ceph_tid_t tid = mds->issue_tid(); - auto req = make_message(CEPH_MDS_OP_RENAME); req->set_filepath(dst); req->set_filepath2(src); - req->set_tid(tid); - - rdn->state_set(CDentry::STATE_REINTEGRATING); - mds->internal_client_requests.emplace(std::piecewise_construct, - std::make_tuple(tid), - std::make_tuple(CEPH_MDS_OP_RENAME, - rdn, tid)); + req->set_tid(mds->issue_tid()); mds->send_message_mds(req, rdn->authority().first); } - + void StrayManager::migrate_stray(CDentry *dn, mds_rank_t to) { dout(10) << __func__ << " " << *dn << " to mds." << to << dendl; @@ -715,17 +702,10 @@ void StrayManager::migrate_stray(CDentry *dn, mds_rank_t to) filepath src(dn->get_name(), dirino); filepath dst(dn->get_name(), MDS_INO_STRAY(to, MDS_INO_STRAY_INDEX(dirino))); - ceph_tid_t tid = mds->issue_tid(); - auto req = make_message(CEPH_MDS_OP_RENAME); req->set_filepath(dst); req->set_filepath2(src); - req->set_tid(tid); - - mds->internal_client_requests.emplace(std::piecewise_construct, - std::make_tuple(tid), - std::make_tuple(CEPH_MDS_OP_RENAME, - nullptr, tid)); + req->set_tid(mds->issue_tid()); mds->send_message_mds(req, to); } diff --git a/ceph/src/mds/cephfs_features.cc b/ceph/src/mds/cephfs_features.cc index a19ff80ac..4a864076b 100644 --- a/ceph/src/mds/cephfs_features.cc +++ b/ceph/src/mds/cephfs_features.cc @@ -29,6 +29,7 @@ static const std::array feature_names "op_getvxattr", "32bits_retry_fwd", "new_snaprealm_info", + "has_owner_uidgid", }; static_assert(feature_names.size() == CEPHFS_FEATURE_MAX + 1); diff --git a/ceph/src/mds/cephfs_features.h b/ceph/src/mds/cephfs_features.h index 9c16388ec..7d215e2a3 100644 --- a/ceph/src/mds/cephfs_features.h +++ b/ceph/src/mds/cephfs_features.h @@ -47,7 +47,8 @@ namespace ceph { #define CEPHFS_FEATURE_OP_GETVXATTR 17 #define CEPHFS_FEATURE_32BITS_RETRY_FWD 18 #define CEPHFS_FEATURE_NEW_SNAPREALM_INFO 19 -#define CEPHFS_FEATURE_MAX 19 +#define CEPHFS_FEATURE_HAS_OWNER_UIDGID 20 +#define CEPHFS_FEATURE_MAX 20 #define CEPHFS_FEATURES_ALL { \ 0, 1, 2, 3, 4, \ @@ -67,7 +68,8 @@ namespace ceph { CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \ CEPHFS_FEATURE_OP_GETVXATTR, \ CEPHFS_FEATURE_32BITS_RETRY_FWD, \ - CEPHFS_FEATURE_NEW_SNAPREALM_INFO \ + CEPHFS_FEATURE_NEW_SNAPREALM_INFO, \ + CEPHFS_FEATURE_HAS_OWNER_UIDGID, \ } #define CEPHFS_METRIC_FEATURES_ALL { \ diff --git a/ceph/src/mds/mdstypes.cc b/ceph/src/mds/mdstypes.cc index a914b9f30..044c33459 100644 --- a/ceph/src/mds/mdstypes.cc +++ b/ceph/src/mds/mdstypes.cc @@ -771,6 +771,10 @@ void mds_table_pending_t::generate_test_instances(std::listtid = 35434; } +void metareqid_t::dump(ceph::Formatter* f) const { + f->dump_object("entity", name); + f->dump_unsigned("tid", tid); +} /* * inode_load_vec_t diff --git a/ceph/src/mds/mdstypes.h b/ceph/src/mds/mdstypes.h index 66ad7944d..3381d44c9 100644 --- a/ceph/src/mds/mdstypes.h +++ b/ceph/src/mds/mdstypes.h @@ -621,6 +621,7 @@ struct metareqid_t { decode(name, p); decode(tid, p); } + void dump(ceph::Formatter *f) const; entity_name_t name; uint64_t tid = 0; diff --git a/ceph/src/messages/MClientRequest.h b/ceph/src/messages/MClientRequest.h index d8cec3153..f63657d2e 100644 --- a/ceph/src/messages/MClientRequest.h +++ b/ceph/src/messages/MClientRequest.h @@ -38,6 +38,7 @@ #include "include/filepath.h" #include "mds/mdstypes.h" #include "include/ceph_features.h" +#include "mds/cephfs_features.h" #include "messages/MMDSOp.h" #include @@ -73,7 +74,7 @@ private: public: mutable struct ceph_mds_request_head head; /* XXX HACK! */ utime_t stamp; - bool peer_old_version = false; + feature_bitset_t mds_features; struct Release { mutable ceph_mds_request_release item; @@ -113,12 +114,16 @@ protected: MClientRequest() : MMDSOp(CEPH_MSG_CLIENT_REQUEST, HEAD_VERSION, COMPAT_VERSION) { memset(&head, 0, sizeof(head)); + head.owner_uid = -1; + head.owner_gid = -1; } - MClientRequest(int op, bool over=true) + MClientRequest(int op, feature_bitset_t features = 0) : MMDSOp(CEPH_MSG_CLIENT_REQUEST, HEAD_VERSION, COMPAT_VERSION) { memset(&head, 0, sizeof(head)); head.op = op; - peer_old_version = over; + mds_features = features; + head.owner_uid = -1; + head.owner_gid = -1; } ~MClientRequest() final {} @@ -201,6 +206,8 @@ public: int get_op() const { return head.op; } unsigned get_caller_uid() const { return head.caller_uid; } unsigned get_caller_gid() const { return head.caller_gid; } + unsigned get_owner_uid() const { return head.owner_uid; } + unsigned get_owner_gid() const { return head.owner_gid; } const std::vector& get_caller_gid_list() const { return gid_list; } const std::string& get_path() const { return path.get_path(); } @@ -227,6 +234,12 @@ public: copy_from_legacy_head(&head, &old_mds_head); head.version = 0; + head.ext_num_retry = head.num_retry; + head.ext_num_fwd = head.num_fwd; + + head.owner_uid = head.caller_uid; + head.owner_gid = head.caller_gid; + /* Can't set the btime from legacy struct */ if (head.op == CEPH_MDS_OP_SETATTR) { int localmask = head.args.setattr.mask; @@ -262,14 +275,16 @@ public: * client will just copy the 'head' memory and isn't * that smart to skip them. */ - if (peer_old_version) { + if (!mds_features.test(CEPHFS_FEATURE_32BITS_RETRY_FWD)) { head.version = 1; + } else if (!mds_features.test(CEPHFS_FEATURE_HAS_OWNER_UIDGID)) { + head.version = 2; } else { head.version = CEPH_MDS_REQUEST_HEAD_VERSION; } if (features & CEPH_FEATURE_FS_BTIME) { - encode(head, payload, peer_old_version); + encode(head, payload); } else { struct ceph_mds_request_head_legacy old_mds_head; @@ -292,6 +307,10 @@ public: out << "client_request(" << get_orig_source() << ":" << get_tid() << " " << ceph_mds_op_name(get_op()); + if (IS_CEPH_MDS_OP_NEWINODE(head.op)) { + out << " owner_uid=" << head.owner_uid + << ", owner_gid=" << head.owner_gid; + } if (head.op == CEPH_MDS_OP_GETATTR) out << " " << ccap_string(head.args.getattr.mask); if (head.op == CEPH_MDS_OP_SETATTR) { diff --git a/ceph/src/messages/MDentryUnlink.h b/ceph/src/messages/MDentryUnlink.h index fc5228441..210fa033c 100644 --- a/ceph/src/messages/MDentryUnlink.h +++ b/ceph/src/messages/MDentryUnlink.h @@ -22,17 +22,15 @@ class MDentryUnlink final : public MMDSOp { private: - static constexpr int HEAD_VERSION = 2; + static constexpr int HEAD_VERSION = 1; static constexpr int COMPAT_VERSION = 1; - + dirfrag_t dirfrag; std::string dn; - bool unlinking = false; public: dirfrag_t get_dirfrag() const { return dirfrag; } const std::string& get_dn() const { return dn; } - bool is_unlinking() const { return unlinking; } ceph::buffer::list straybl; ceph::buffer::list snapbl; @@ -40,9 +38,10 @@ private: protected: MDentryUnlink() : MMDSOp(MSG_MDS_DENTRYUNLINK, HEAD_VERSION, COMPAT_VERSION) { } - MDentryUnlink(dirfrag_t df, std::string_view n, bool u=false) : + MDentryUnlink(dirfrag_t df, std::string_view n) : MMDSOp(MSG_MDS_DENTRYUNLINK, HEAD_VERSION, COMPAT_VERSION), - dirfrag(df), dn(n), unlinking(u) {} + dirfrag(df), + dn(n) {} ~MDentryUnlink() final {} public: @@ -50,66 +49,19 @@ public: void print(std::ostream& o) const override { o << "dentry_unlink(" << dirfrag << " " << dn << ")"; } - + void decode_payload() override { using ceph::decode; auto p = payload.cbegin(); decode(dirfrag, p); decode(dn, p); decode(straybl, p); - if (header.version >= 2) - decode(unlinking, p); } void encode_payload(uint64_t features) override { using ceph::encode; encode(dirfrag, payload); encode(dn, payload); encode(straybl, payload); - encode(unlinking, payload); - } -private: - template - friend boost::intrusive_ptr ceph::make_message(Args&&... args); - template - friend MURef crimson::make_message(Args&&... args); -}; - -class MDentryUnlinkAck final : public MMDSOp { -private: - static constexpr int HEAD_VERSION = 1; - static constexpr int COMPAT_VERSION = 1; - - dirfrag_t dirfrag; - std::string dn; - - public: - dirfrag_t get_dirfrag() const { return dirfrag; } - const std::string& get_dn() const { return dn; } - -protected: - MDentryUnlinkAck() : - MMDSOp(MSG_MDS_DENTRYUNLINK_ACK, HEAD_VERSION, COMPAT_VERSION) { } - MDentryUnlinkAck(dirfrag_t df, std::string_view n) : - MMDSOp(MSG_MDS_DENTRYUNLINK_ACK, HEAD_VERSION, COMPAT_VERSION), - dirfrag(df), dn(n) {} - ~MDentryUnlinkAck() final {} - -public: - std::string_view get_type_name() const override { return "dentry_unlink_ack";} - void print(std::ostream& o) const override { - o << "dentry_unlink_ack(" << dirfrag << " " << dn << ")"; - } - - void decode_payload() override { - using ceph::decode; - auto p = payload.cbegin(); - decode(dirfrag, p); - decode(dn, p); - } - void encode_payload(uint64_t features) override { - using ceph::encode; - encode(dirfrag, payload); - encode(dn, payload); } private: template diff --git a/ceph/src/mgr/ActivePyModules.cc b/ceph/src/mgr/ActivePyModules.cc index c007509f9..45038e734 100644 --- a/ceph/src/mgr/ActivePyModules.cc +++ b/ceph/src/mgr/ActivePyModules.cc @@ -1511,13 +1511,13 @@ void ActivePyModules::cluster_log(const std::string &channel, clog_type prio, cl->do_log(prio, message); } -void ActivePyModules::register_client(std::string_view name, std::string addrs) +void ActivePyModules::register_client(std::string_view name, std::string addrs, bool replace) { entity_addrvec_t addrv; addrv.parse(addrs.data()); - dout(7) << "registering msgr client handle " << addrv << dendl; - py_module_registry.register_client(name, std::move(addrv)); + dout(7) << "registering msgr client handle " << addrv << " (replace=" << replace << ")" << dendl; + py_module_registry.register_client(name, std::move(addrv), replace); } void ActivePyModules::unregister_client(std::string_view name, std::string addrs) diff --git a/ceph/src/mgr/ActivePyModules.h b/ceph/src/mgr/ActivePyModules.h index 4b180942d..283f96a6e 100644 --- a/ceph/src/mgr/ActivePyModules.h +++ b/ceph/src/mgr/ActivePyModules.h @@ -158,7 +158,7 @@ public: void clear_all_progress_events(); void get_progress_events(std::map* events); - void register_client(std::string_view name, std::string addrs); + void register_client(std::string_view name, std::string addrs, bool replace); void unregister_client(std::string_view name, std::string addrs); void config_notify(); diff --git a/ceph/src/mgr/BaseMgrModule.cc b/ceph/src/mgr/BaseMgrModule.cc index 4fb5b250b..ab64ac39f 100644 --- a/ceph/src/mgr/BaseMgrModule.cc +++ b/ceph/src/mgr/BaseMgrModule.cc @@ -1388,12 +1388,15 @@ ceph_is_authorized(BaseMgrModule *self, PyObject *args) static PyObject* ceph_register_client(BaseMgrModule *self, PyObject *args) { - char *addrs = nullptr; - if (!PyArg_ParseTuple(args, "s:ceph_register_client", &addrs)) { + const char* _name = nullptr; + char* addrs = nullptr; + int replace = 0; + if (!PyArg_ParseTuple(args, "zsp:ceph_register_client", &_name, &addrs, &replace)) { return nullptr; } + auto name = _name ? std::string(_name) : std::string(self->this_module->get_name()); without_gil([&] { - self->py_modules->register_client(self->this_module->get_name(), addrs); + self->py_modules->register_client(name, addrs, replace); }); Py_RETURN_NONE; } @@ -1401,12 +1404,14 @@ ceph_register_client(BaseMgrModule *self, PyObject *args) static PyObject* ceph_unregister_client(BaseMgrModule *self, PyObject *args) { - char *addrs = nullptr; - if (!PyArg_ParseTuple(args, "s:ceph_unregister_client", &addrs)) { + const char* _name = nullptr; + char* addrs = nullptr; + if (!PyArg_ParseTuple(args, "zs:ceph_unregister_client", &_name, &addrs)) { return nullptr; } + auto name = _name ? std::string(_name) : std::string(self->this_module->get_name()); without_gil([&] { - self->py_modules->unregister_client(self->this_module->get_name(), addrs); + self->py_modules->unregister_client(name, addrs); }); Py_RETURN_NONE; } diff --git a/ceph/src/mgr/DaemonServer.cc b/ceph/src/mgr/DaemonServer.cc index 46c475394..0e9e6be2a 100644 --- a/ceph/src/mgr/DaemonServer.cc +++ b/ceph/src/mgr/DaemonServer.cc @@ -179,7 +179,7 @@ entity_addrvec_t DaemonServer::get_myaddrs() const return msgr->get_myaddrs(); } -int DaemonServer::ms_handle_authentication(Connection *con) +int DaemonServer::ms_handle_fast_authentication(Connection *con) { auto s = ceph::make_ref(cct); con->set_priv(s); @@ -214,16 +214,19 @@ int DaemonServer::ms_handle_authentication(Connection *con) dout(10) << " session " << s << " " << s->entity_name << " has caps " << s->caps << " '" << str << "'" << dendl; } + return 1; +} +void DaemonServer::ms_handle_accept(Connection* con) +{ if (con->get_peer_type() == CEPH_ENTITY_TYPE_OSD) { + auto s = ceph::ref_cast(con->get_priv()); std::lock_guard l(lock); s->osd_id = atoi(s->entity_name.get_id().c_str()); dout(10) << "registering osd." << s->osd_id << " session " << s << " con " << con << dendl; osd_cons[s->osd_id].insert(con); } - - return 1; } bool DaemonServer::ms_handle_reset(Connection *con) diff --git a/ceph/src/mgr/DaemonServer.h b/ceph/src/mgr/DaemonServer.h index ff9835680..a7b645610 100644 --- a/ceph/src/mgr/DaemonServer.h +++ b/ceph/src/mgr/DaemonServer.h @@ -269,7 +269,8 @@ public: ~DaemonServer() override; bool ms_dispatch2(const ceph::ref_t& m) override; - int ms_handle_authentication(Connection *con) override; + int ms_handle_fast_authentication(Connection *con) override; + void ms_handle_accept(Connection *con) override; bool ms_handle_reset(Connection *con) override; void ms_handle_remote_reset(Connection *con) override {} bool ms_handle_refused(Connection *con) override; diff --git a/ceph/src/mgr/Mgr.cc b/ceph/src/mgr/Mgr.cc index 7dc158fe1..cb988cf76 100644 --- a/ceph/src/mgr/Mgr.cc +++ b/ceph/src/mgr/Mgr.cc @@ -388,7 +388,7 @@ void Mgr::init() entity_addrvec_t addrv; addrv.parse(ident); ident = (char*)realloc(ident, 0); - py_module_registry->register_client("libcephsqlite", addrv); + py_module_registry->register_client("libcephsqlite", addrv, true); } #endif diff --git a/ceph/src/mgr/MgrClient.cc b/ceph/src/mgr/MgrClient.cc index 6253d2670..6250ea3b9 100644 --- a/ceph/src/mgr/MgrClient.cc +++ b/ceph/src/mgr/MgrClient.cc @@ -14,6 +14,7 @@ #include "MgrClient.h" +#include "common/perf_counters_key.h" #include "mgr/MgrContext.h" #include "mon/MonMap.h" @@ -331,6 +332,12 @@ void MgrClient::_send_report() const PerfCounters::perf_counter_data_any_d &ctr, const PerfCounters &perf_counters) { + // FIXME: We don't send labeled perf counters to the mgr currently. + auto labels = ceph::perf_counters::key_labels(perf_counters.get_name()); + if (labels.begin() != labels.end()) { + return false; + } + return perf_counters.get_adjusted_priority(ctr.prio) >= (int)stats_threshold; }; @@ -367,20 +374,20 @@ void MgrClient::_send_report() } if (session->declared.count(path) == 0) { - ldout(cct,20) << " declare " << path << dendl; - PerfCounterType type; - type.path = path; - if (data.description) { - type.description = data.description; - } - if (data.nick) { - type.nick = data.nick; - } - type.type = data.type; - type.priority = perf_counters.get_adjusted_priority(data.prio); - type.unit = data.unit; - report->declare_types.push_back(std::move(type)); - session->declared.insert(path); + ldout(cct, 20) << " declare " << path << dendl; + PerfCounterType type; + type.path = path; + if (data.description) { + type.description = data.description; + } + if (data.nick) { + type.nick = data.nick; + } + type.type = data.type; + type.priority = perf_counters.get_adjusted_priority(data.prio); + type.unit = data.unit; + report->declare_types.push_back(std::move(type)); + session->declared.insert(path); } encode(static_cast(data.u64), report->packed); diff --git a/ceph/src/mgr/PyFormatter.cc b/ceph/src/mgr/PyFormatter.cc index 8e58f6e9a..6a7f3e982 100644 --- a/ceph/src/mgr/PyFormatter.cc +++ b/ceph/src/mgr/PyFormatter.cc @@ -37,6 +37,11 @@ void PyFormatter::open_object_section(std::string_view name) cursor = dict; } +void PyFormatter::dump_null(std::string_view name) +{ + dump_pyobject(name, Py_None); +} + void PyFormatter::dump_unsigned(std::string_view name, uint64_t u) { PyObject *p = PyLong_FromUnsignedLong(u); diff --git a/ceph/src/mgr/PyFormatter.h b/ceph/src/mgr/PyFormatter.h index 5e4c0a679..b45fbf162 100644 --- a/ceph/src/mgr/PyFormatter.h +++ b/ceph/src/mgr/PyFormatter.h @@ -87,6 +87,7 @@ public: stack.pop(); } void dump_bool(std::string_view name, bool b) override; + void dump_null(std::string_view name) override; void dump_unsigned(std::string_view name, uint64_t u) override; void dump_int(std::string_view name, int64_t u) override; void dump_float(std::string_view name, double d) override; diff --git a/ceph/src/mgr/PyModuleRegistry.h b/ceph/src/mgr/PyModuleRegistry.h index e16b2830d..9af9abb57 100644 --- a/ceph/src/mgr/PyModuleRegistry.h +++ b/ceph/src/mgr/PyModuleRegistry.h @@ -202,10 +202,14 @@ public: return active_modules->get_services(); } - void register_client(std::string_view name, entity_addrvec_t addrs) + void register_client(std::string_view name, entity_addrvec_t addrs, bool replace) { std::lock_guard l(lock); - clients.emplace(std::string(name), std::move(addrs)); + auto n = std::string(name); + if (replace) { + clients.erase(n); + } + clients.emplace(n, std::move(addrs)); } void unregister_client(std::string_view name, const entity_addrvec_t& addrs) { diff --git a/ceph/src/mon/AuthMonitor.cc b/ceph/src/mon/AuthMonitor.cc index 840ca38fc..395ff4926 100644 --- a/ceph/src/mon/AuthMonitor.cc +++ b/ceph/src/mon/AuthMonitor.cc @@ -827,7 +827,7 @@ bool AuthMonitor::prep_auth(MonOpRequestRef op, bool paxos_writable) } if (ret > 0) { if (!s->authenticated && - mon.ms_handle_authentication(s->con.get()) > 0) { + mon.ms_handle_fast_authentication(s->con.get()) > 0) { finished = true; } ret = 0; @@ -1355,7 +1355,7 @@ bool AuthMonitor::valid_caps( } } else if (type == "mds") { MDSAuthCaps mdscap; - if (!mdscap.parse(g_ceph_context, caps, out)) { + if (!mdscap.parse(caps, out)) { return false; } } else { diff --git a/ceph/src/mon/ConfigMonitor.cc b/ceph/src/mon/ConfigMonitor.cc index 471aebf6d..e24ccbc18 100644 --- a/ceph/src/mon/ConfigMonitor.cc +++ b/ceph/src/mon/ConfigMonitor.cc @@ -920,6 +920,7 @@ bool ConfigMonitor::refresh_config(MonSession *s) string device_class; if (s->name.is_osd()) { + osdmap.crush->get_full_location(s->entity_name.to_str(), &crush_location); const char *c = osdmap.crush->get_item_class(s->name.num()); if (c) { device_class = c; diff --git a/ceph/src/mon/ElectionLogic.cc b/ceph/src/mon/ElectionLogic.cc index e22a85bed..0c1b30c41 100644 --- a/ceph/src/mon/ElectionLogic.cc +++ b/ceph/src/mon/ElectionLogic.cc @@ -398,7 +398,8 @@ void ElectionLogic::propose_connectivity_handler(int from, epoch_t mepoch, ldout(cct, 10) << "propose from rank=" << from << ",from_score=" << from_score << "; my score=" << my_score << "; currently acked " << leader_acked - << ",leader_score=" << leader_score << dendl; + << ",leader_score=" << leader_score + << ",disallowed_leaders=" << elector->get_disallowed_leaders() << dendl; bool my_win = (my_score >= 0) && // My score is non-zero; I am allowed to lead ((my_rank < from && my_score >= from_score) || // We have same scores and I have lower rank, or diff --git a/ceph/src/mon/FSCommands.cc b/ceph/src/mon/FSCommands.cc index b7cf506fc..65d2c356b 100644 --- a/ceph/src/mon/FSCommands.cc +++ b/ceph/src/mon/FSCommands.cc @@ -1660,6 +1660,14 @@ int FileSystemCommandHandler::_check_pool( return -EINVAL; } + if (type != POOL_METADATA && pool->pg_autoscale_mode == pg_pool_t::pg_autoscale_mode_t::ON && !pool->has_flag(pg_pool_t::FLAG_BULK)) { + // TODO: consider issuing an info event in this case + *ss << " Pool '" << pool_name << "' (id '" << pool_id + << "') has pg autoscale mode 'on' but is not marked as bulk." << std::endl + << " Consider setting the flag by running" << std::endl + << " # ceph osd pool set " << pool_name << " bulk true" << std::endl; + } + // Nothing special about this pool, so it is permissible return 0; } diff --git a/ceph/src/mon/MDSMonitor.cc b/ceph/src/mon/MDSMonitor.cc index 091206a68..4b27d828c 100644 --- a/ceph/src/mon/MDSMonitor.cc +++ b/ceph/src/mon/MDSMonitor.cc @@ -804,6 +804,7 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) last_beacon.erase(followergid); } request_proposal(mon.osdmon()); + force_immediate_propose(); pending.damaged(rankgid, blocklist_epoch); last_beacon.erase(rankgid); @@ -1277,6 +1278,8 @@ bool MDSMonitor::fail_mds_gid(FSMap &fsmap, mds_gid_t gid) utime_t until = ceph_clock_now(); until += g_conf().get_val("mon_mds_blocklist_interval"); blocklist_epoch = mon.osdmon()->blocklist(info.addrs, until); + /* do not delay when we are evicting an MDS */ + force_immediate_propose(); } fsmap.erase(gid, blocklist_epoch); @@ -1438,8 +1441,7 @@ bool MDSMonitor::prepare_command(MonOpRequestRef op) out: dout(4) << __func__ << " done, r=" << r << dendl; /* Compose response */ - string rs; - getline(ss, rs); + string rs = ss.str(); if (r >= 0) { // success.. delay reply diff --git a/ceph/src/mon/MonClient.cc b/ceph/src/mon/MonClient.cc index 45550a5ca..ab3d11697 100644 --- a/ceph/src/mon/MonClient.cc +++ b/ceph/src/mon/MonClient.cc @@ -154,11 +154,8 @@ int MonClient::get_monmap_and_config() if (r < 0) { return r; } - r = authenticate(std::chrono::duration(cct->_conf.get_val("client_mount_timeout")).count()); - if (r == -ETIMEDOUT) { - shutdown(); - continue; - } + r = authenticate( + cct->_conf.get_val("client_mount_timeout").count()); if (r < 0) { break; } @@ -1605,7 +1602,7 @@ int MonClient::handle_auth_request( // for some channels prior to nautilus (osd heartbeat), we // tolerate the lack of an authorizer. if (!con->get_messenger()->require_authorizer) { - handle_authentication_dispatcher->ms_handle_authentication(con); + handle_authentication_dispatcher->ms_handle_fast_authentication(con); return 1; } return -EACCES; @@ -1643,7 +1640,7 @@ int MonClient::handle_auth_request( &auth_meta->connection_secret, ac); if (isvalid) { - handle_authentication_dispatcher->ms_handle_authentication(con); + handle_authentication_dispatcher->ms_handle_fast_authentication(con); return 1; } if (!more && !was_challenge && auth_meta->authorizer_challenge) { diff --git a/ceph/src/mon/MonCommands.h b/ceph/src/mon/MonCommands.h index f9dd7e39a..52af09c8c 100644 --- a/ceph/src/mon/MonCommands.h +++ b/ceph/src/mon/MonCommands.h @@ -843,13 +843,13 @@ COMMAND("osd erasure-code-profile ls", COMMAND("osd set " "name=key,type=CephChoices,strings=full|pause|noup|nodown|" "noout|noin|nobackfill|norebalance|norecover|noscrub|nodeep-scrub|" - "notieragent|nosnaptrim|pglog_hardlimit " + "notieragent|nosnaptrim|pglog_hardlimit|noautoscale " "name=yes_i_really_mean_it,type=CephBool,req=false", "set ", "osd", "rw") COMMAND("osd unset " "name=key,type=CephChoices,strings=full|pause|noup|nodown|"\ "noout|noin|nobackfill|norebalance|norecover|noscrub|nodeep-scrub|" - "notieragent|nosnaptrim", + "notieragent|nosnaptrim|noautoscale", "unset ", "osd", "rw") COMMAND("osd require-osd-release "\ "name=release,type=CephChoices,strings=octopus|pacific|quincy|reef " diff --git a/ceph/src/mon/MonMap.cc b/ceph/src/mon/MonMap.cc index 33b9aa8fa..bb8a4b194 100644 --- a/ceph/src/mon/MonMap.cc +++ b/ceph/src/mon/MonMap.cc @@ -369,6 +369,7 @@ void MonMap::print_summary(ostream& out) const has_printed = true; } out << "}" << " removed_ranks: {" << removed_ranks << "}"; + out << " disallowed_leaders: {" << disallowed_leaders << "}"; } void MonMap::print(ostream& out) const diff --git a/ceph/src/mon/MonOpRequest.h b/ceph/src/mon/MonOpRequest.h index 73275e81e..0c4379910 100644 --- a/ceph/src/mon/MonOpRequest.h +++ b/ceph/src/mon/MonOpRequest.h @@ -131,7 +131,7 @@ private: } protected: - void _dump_op_descriptor_unlocked(std::ostream& stream) const override { + void _dump_op_descriptor(std::ostream& stream) const override { get_req()->print(stream); } diff --git a/ceph/src/mon/Monitor.cc b/ceph/src/mon/Monitor.cc index 5635e5ebc..002a02fe0 100644 --- a/ceph/src/mon/Monitor.cc +++ b/ceph/src/mon/Monitor.cc @@ -2005,6 +2005,7 @@ void Monitor::handle_probe_reply(MonOpRequestRef op) dout(10) << " got newer/committed monmap epoch " << newmap->get_epoch() << ", mine was " << monmap->get_epoch() << dendl; int epoch_diff = newmap->get_epoch() - monmap->get_epoch(); + dout(20) << " new monmap is " << *newmap << dendl; delete newmap; monmap->decode(m->monmap_bl); dout(20) << "has_ever_joined: " << has_ever_joined << dendl; @@ -6380,7 +6381,7 @@ int Monitor::handle_auth_request( &auth_meta->connection_secret, &auth_meta->authorizer_challenge); if (isvalid) { - ms_handle_authentication(con); + ms_handle_fast_authentication(con); return 1; } if (!more && !was_challenge && auth_meta->authorizer_challenge) { @@ -6501,7 +6502,7 @@ int Monitor::handle_auth_request( } if (r > 0 && !s->authenticated) { - ms_handle_authentication(con); + ms_handle_fast_authentication(con); } dout(30) << " r " << r << " reply:\n"; @@ -6539,7 +6540,7 @@ void Monitor::ms_handle_accept(Connection *con) } } -int Monitor::ms_handle_authentication(Connection *con) +int Monitor::ms_handle_fast_authentication(Connection *con) { if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) { // mon <-> mon connections need no Session, and setting one up @@ -6656,14 +6657,16 @@ void Monitor::notify_new_monmap(bool can_change_external_state, bool remove_rank void Monitor::set_elector_disallowed_leaders(bool allow_election) { set dl; + // inherit dl from monmap for (auto name : monmap->disallowed_leaders) { dl.insert(monmap->get_rank(name)); - } - if (is_stretch_mode()) { - for (auto name : monmap->stretch_marked_down_mons) { - dl.insert(monmap->get_rank(name)); - } - dl.insert(monmap->get_rank(monmap->tiebreaker_mon)); + } // unconditionally add stretch_marked_down_mons to the new dl copy + for (auto name : monmap->stretch_marked_down_mons) { + dl.insert(monmap->get_rank(name)); + } // add the tiebreaker_mon incase it is not in monmap->disallowed_leaders + if (!monmap->tiebreaker_mon.empty() && + monmap->contains(monmap->tiebreaker_mon)) { + dl.insert(monmap->get_rank(monmap->tiebreaker_mon)); } bool disallowed_changed = elector.set_disallowed_leaders(dl); diff --git a/ceph/src/mon/Monitor.h b/ceph/src/mon/Monitor.h index 998fe91eb..7f9a16a9a 100644 --- a/ceph/src/mon/Monitor.h +++ b/ceph/src/mon/Monitor.h @@ -957,7 +957,7 @@ public: MonCap mon_caps; bool get_authorizer(int dest_type, AuthAuthorizer **authorizer); public: // for AuthMonitor msgr1: - int ms_handle_authentication(Connection *con) override; + int ms_handle_fast_authentication(Connection *con) override; private: void ms_handle_accept(Connection *con) override; bool ms_handle_reset(Connection *con) override; diff --git a/ceph/src/mon/OSDMonitor.cc b/ceph/src/mon/OSDMonitor.cc index 3acafbb82..360bd036b 100644 --- a/ceph/src/mon/OSDMonitor.cc +++ b/ceph/src/mon/OSDMonitor.cc @@ -2769,7 +2769,7 @@ bool OSDMonitor::preprocess_query(MonOpRequestRef op) default: ceph_abort(); - return true; + return false; } } @@ -2808,7 +2808,7 @@ bool OSDMonitor::prepare_update(MonOpRequestRef op) } catch (const bad_cmd_get& e) { bufferlist bl; mon.reply_command(op, -EINVAL, e.what(), bl, get_last_committed()); - return true; + return false; /* nothing to propose */ } case CEPH_MSG_POOLOP: @@ -4057,7 +4057,7 @@ bool OSDMonitor::prepare_pg_ready_to_merge(MonOpRequestRef op) << " race with concurrent pg_num[_pending] update, will retry" << dendl; wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + return false; /* nothing to propose, yet */ } if (m->ready) { @@ -4438,7 +4438,7 @@ bool OSDMonitor::prepare_beacon(MonOpRequestRef op) send_latest(op, beacon->version+1); } dout(1) << " ignoring beacon from non-active osd." << from << dendl; - return false; + return false; /* nothing to propose */ } last_osd_report[from].first = ceph_clock_now(); @@ -4461,7 +4461,7 @@ bool OSDMonitor::prepare_beacon(MonOpRequestRef op) beacon->last_purged_snaps_scrub; return true; } else { - return false; + return false; /* nothing to propose */ } } @@ -9776,14 +9776,14 @@ bool OSDMonitor::prepare_command(MonOpRequestRef op) if (!cmdmap_from_json(m->cmd, &cmdmap, ss)) { string rs = ss.str(); mon.reply_command(op, -EINVAL, rs, get_last_committed()); - return true; + return false; /* nothing to propose */ } MonSession *session = op->get_session(); if (!session) { derr << __func__ << " no session" << dendl; mon.reply_command(op, -EACCES, "access denied", get_last_committed()); - return true; + return false; /* nothing to propose */ } return prepare_command_impl(op, cmdmap); @@ -9991,7 +9991,6 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, { op->mark_osdmon_event(__func__); auto m = op->get_req(); - bool ret = false; stringstream ss; string rs; bufferlist rdata; @@ -10051,8 +10050,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, (prefix == "osd crush set" && !osdid_present)) { if (pending_inc.crush.length()) { dout(10) << __func__ << " waiting for pending crush update " << dendl; - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } dout(10) << "prepare_command setting new crush map" << dendl; bufferlist data(m->get_data()); @@ -10064,7 +10062,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, catch (const std::exception &e) { err = -EINVAL; ss << "Failed to parse crushmap: " << e.what(); - goto reply; + goto reply_no_propose; } int64_t prior_version = 0; @@ -10082,25 +10080,25 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << dendl; err = 0; ss << osdmap.get_crush_version(); - goto reply; + goto reply_no_propose; } } if (prior_version != osdmap.get_crush_version()) { err = -EPERM; ss << "prior_version " << prior_version << " != crush version " << osdmap.get_crush_version(); - goto reply; + goto reply_no_propose; } } if (!validate_crush_against_features(&crush, ss)) { err = -EINVAL; - goto reply; + goto reply_no_propose; } err = osdmap.validate_crush_rules(&crush, &ss); if (err < 0) { - goto reply; + goto reply_no_propose; } if (g_conf()->mon_osd_crush_smoke_test) { @@ -10120,7 +10118,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << ": " << ess.str() << dendl; ss << "crush smoke test failed with " << r << ": " << ess.str(); err = r; - goto reply; + goto reply_no_propose; } dout(10) << __func__ << " crush somke test duration: " << duration << ", result: " << ess.str() << dendl; @@ -10142,7 +10140,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } if (!validate_crush_against_features(&newcrush, ss)) { err = -EINVAL; - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); newcrush.encode(pending_inc.crush, mon.get_quorum_con_features()); @@ -10153,7 +10151,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, string device_class; if (!cmd_getval(cmdmap, "class", device_class)) { err = -EINVAL; // no value! - goto reply; + goto reply_no_propose; } bool stop = false; @@ -10206,7 +10204,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << dendl; err = newcrush.update_device_class(osd, device_class, name, &ss); if (err < 0) { - goto reply; + goto reply_no_propose; } if (err == 0 && !_have_pending_crush()) { if (!stop) { @@ -10250,7 +10248,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, // ss has reason for failure ss << ", unable to parse osd id:\"" << idvec[j] << "\". "; err = -EINVAL; - goto reply; + goto reply_no_propose; } osds.insert(osd); } @@ -10273,7 +10271,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = newcrush.remove_device_class(cct, osd, &ss); if (err < 0) { // ss has reason for failure - goto reply; + goto reply_no_propose; } updated.insert(osd); } @@ -10291,18 +10289,18 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, string device_class; if (!cmd_getval(cmdmap, "class", device_class)) { err = -EINVAL; // no value! - goto reply; + goto reply_no_propose; } if (osdmap.require_osd_release < ceph_release_t::luminous) { ss << "you must complete the upgrade and 'ceph osd require-osd-release " << "luminous' before using crush device classes"; err = -EPERM; - goto reply; + goto reply_no_propose; } if (!_have_pending_crush() && _get_stable_crush().class_exists(device_class)) { ss << "class '" << device_class << "' already exists"; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); if (newcrush.class_exists(device_class)) { @@ -10319,18 +10317,18 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, string device_class; if (!cmd_getval(cmdmap, "class", device_class)) { err = -EINVAL; // no value! - goto reply; + goto reply_no_propose; } if (osdmap.require_osd_release < ceph_release_t::luminous) { ss << "you must complete the upgrade and 'ceph osd require-osd-release " << "luminous' before using crush device classes"; err = -EPERM; - goto reply; + goto reply_no_propose; } if (!osdmap.crush->class_exists(device_class)) { err = 0; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); @@ -10343,7 +10341,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (newcrush.class_is_in_use(class_id, &ts)) { err = -EBUSY; ss << "class '" << device_class << "' " << ts.str(); - goto reply; + goto reply_no_propose; } // check if class is used by any erasure-code-profiles @@ -10368,7 +10366,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = -EBUSY; ss << "class '" << device_class << "' is still referenced by erasure-code-profile(s): " << referenced_by; - goto reply; + goto reply_no_propose; } set osds; @@ -10377,7 +10375,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = newcrush.remove_device_class(cct, p, &ss); if (err < 0) { // ss has reason for failure - goto reply; + goto reply_no_propose; } } @@ -10387,7 +10385,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err < 0) { ss << "class '" << device_class << "' cannot be removed '" << cpp_strerror(err) << "'"; - goto reply; + goto reply_no_propose; } } @@ -10400,11 +10398,11 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, string srcname, dstname; if (!cmd_getval(cmdmap, "srcname", srcname)) { err = -EINVAL; - goto reply; + goto reply_no_propose; } if (!cmd_getval(cmdmap, "dstname", dstname)) { err = -EINVAL; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); @@ -10413,14 +10411,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, // so command is idempotent ss << "already renamed to '" << dstname << "'"; err = 0; - goto reply; + goto reply_no_propose; } err = newcrush.rename_class(srcname, dstname); if (err < 0) { ss << "fail to rename '" << srcname << "' to '" << dstname << "' : " << cpp_strerror(err); - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); @@ -10444,7 +10442,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!_have_pending_crush() && _get_stable_crush().name_exists(name)) { ss << "bucket '" << name << "' already exists"; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); @@ -10457,12 +10455,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (type < 0) { ss << "type '" << typestr << "' does not exist"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (type == 0) { ss << "type '" << typestr << "' is for devices, not buckets"; err = -EINVAL; - goto reply; + goto reply_no_propose; } int bucketno; err = newcrush.add_bucket(0, 0, @@ -10470,12 +10468,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, NULL, &bucketno); if (err < 0) { ss << "add_bucket error: '" << cpp_strerror(err) << "'"; - goto reply; + goto reply_no_propose; } err = newcrush.set_item_name(bucketno, name); if (err < 0) { ss << "error setting bucket name to '" << name << "'"; - goto reply; + goto reply_no_propose; } if (!loc.empty()) { @@ -10484,7 +10482,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = newcrush.move_bucket(cct, bucketno, loc); if (err < 0) { ss << "error moving bucket '" << name << "' to location " << loc; - goto reply; + goto reply_no_propose; } } else { ss << "no need to move item id " << bucketno << " name '" << name @@ -10508,12 +10506,15 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, cmd_getval(cmdmap, "dstname", dstname); err = crush_rename_bucket(srcname, dstname, &ss); - if (err == -EALREADY) // equivalent to success for idempotency - err = 0; - if (err) - goto reply; - else + if (err) { + // equivalent to success for idempotency + if (err == -EALREADY) { + err = 0; + } + goto reply_no_propose; + } else { goto update; + } } else if (prefix == "osd crush weight-set create" || prefix == "osd crush weight-set create-compat") { if (_have_pending_crush()) { @@ -10526,7 +10527,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (newcrush.has_non_straw2_buckets()) { ss << "crush map contains one or more bucket(s) that are not straw2"; err = -EPERM; - goto reply; + goto reply_no_propose; } if (prefix == "osd crush weight-set create") { if (osdmap.require_min_compat_client != ceph_release_t::unknown && @@ -10537,7 +10538,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << "Try 'ceph osd set-require-min-compat-client luminous' " << "before using the new interface"; err = -EPERM; - goto reply; + goto reply_no_propose; } string poolname, mode; cmd_getval(cmdmap, "pool", poolname); @@ -10545,13 +10546,13 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool < 0) { ss << "pool '" << poolname << "' not found"; err = -ENOENT; - goto reply; + goto reply_no_propose; } cmd_getval(cmdmap, "mode", mode); if (mode != "flat" && mode != "positional") { ss << "unrecognized weight-set mode '" << mode << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } positions = mode == "flat" ? 1 : osdmap.get_pg_pool(pool)->get_size(); } else { @@ -10565,7 +10566,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "weight-set for pool '" << osdmap.get_pool_name(pool) << "' already created"; } - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); newcrush.encode(pending_inc.crush, mon.get_quorum_con_features()); @@ -10582,7 +10583,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool < 0) { ss << "pool '" << poolname << "' not found"; err = -ENOENT; - goto reply; + goto reply_no_propose; } } else { pool = CrushWrapper::DEFAULT_CHOOSE_ARGS; @@ -10606,32 +10607,32 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool < 0) { ss << "pool '" << poolname << "' not found"; err = -ENOENT; - goto reply; + goto reply_no_propose; } if (!newcrush.have_choose_args(pool)) { ss << "no weight-set for pool '" << poolname << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } auto arg_map = newcrush.choose_args_get(pool); int positions = newcrush.get_choose_args_positions(arg_map); if (weight.size() != (size_t)positions) { ss << "must specify exact " << positions << " weight values"; err = -EINVAL; - goto reply; + goto reply_no_propose; } } else { pool = CrushWrapper::DEFAULT_CHOOSE_ARGS; if (!newcrush.have_choose_args(pool)) { ss << "no backward-compatible weight-set"; err = -ENOENT; - goto reply; + goto reply_no_propose; } } if (!newcrush.name_exists(item)) { ss << "item '" << item << "' does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } err = newcrush.choose_args_adjust_item_weightf( cct, @@ -10640,7 +10641,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, weight, &ss); if (err < 0) { - goto reply; + goto reply_no_propose; } err = 0; pending_inc.crush.clear(); @@ -10656,7 +10657,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = -ENOENT; ss << osd_name << " does not exist. Create it before updating the crush map"; - goto reply; + goto reply_no_propose; } double weight; @@ -10664,7 +10665,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse weight value '" << cmd_vartype_stringify(cmdmap.at("weight")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } string args; @@ -10679,7 +10680,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to set item id " << osdid << " name '" << osd_name << "' weight " << weight << " at location " << loc << ": does not exist"; - goto reply; + goto reply_no_propose; } dout(5) << "adding/updating crush item id " << osdid << " name '" @@ -10700,12 +10701,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } if (err < 0) - goto reply; + goto reply_no_propose; if (err == 0 && !_have_pending_crush()) { ss << action << " item id " << osdid << " name '" << osd_name << "' weight " << weight << " at location " << loc << ": no change"; - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); @@ -10724,7 +10725,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = -ENOENT; ss << osd_name << " does not exist. create it before updating the crush map"; - goto reply; + goto reply_no_propose; } double weight; @@ -10732,7 +10733,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse weight value '" << cmd_vartype_stringify(cmdmap.at("weight")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } string args; @@ -10822,12 +10823,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!newcrush.name_exists(source)) { ss << "source item " << source << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } if (!newcrush.name_exists(dest)) { ss << "dest item " << dest << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } int sid = newcrush.get_item_id(source); int did = newcrush.get_item_id(dest); @@ -10835,7 +10836,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (newcrush.get_immediate_parent_id(sid, &sparent) == 0 && !force) { ss << "source item " << source << " is not an orphan bucket; pass --yes-i-really-mean-it to proceed anyway"; err = -EPERM; - goto reply; + goto reply_no_propose; } if (newcrush.get_bucket_alg(sid) != newcrush.get_bucket_alg(did) && !force) { @@ -10843,13 +10844,13 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << "dest bucket alg " << crush_alg_name(newcrush.get_bucket_alg(did)) << "; pass --yes-i-really-mean-it to proceed anyway"; err = -EPERM; - goto reply; + goto reply_no_propose; } int r = newcrush.swap_bucket(cct, sid, did); if (r < 0) { ss << "failed to swap bucket contents: " << cpp_strerror(r); err = r; - goto reply; + goto reply_no_propose; } ss << "swapped bucket of " << source << " to " << dest; pending_inc.crush.clear(); @@ -10873,7 +10874,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!osdmap.crush->name_exists(name)) { err = -ENOENT; ss << "item " << name << " does not exist"; - goto reply; + goto reply_no_propose; } else { dout(5) << "resolved crush name '" << name << "' to id " << id << dendl; } @@ -10881,7 +10882,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "no need to move item id " << id << " name '" << name << "' to location " << loc << " in crush map"; err = 0; - goto reply; + goto reply_no_propose; } dout(5) << "linking crush item name '" << name << "' at location " << loc << dendl; @@ -10890,7 +10891,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!newcrush.name_exists(name)) { err = -ENOENT; ss << "item " << name << " does not exist"; - goto reply; + goto reply_no_propose; } else { int id = newcrush.get_item_id(name); if (!newcrush.check_item_loc(cct, id, loc, (int *)NULL)) { @@ -10903,7 +10904,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "cannot link item id " << id << " name '" << name << "' to location " << loc; - goto reply; + goto reply_no_propose; } } else { ss << "no need to move item id " << id << " name '" << name @@ -10993,27 +10994,27 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!newcrush.name_exists(name)) { err = -ENOENT; ss << "device '" << name << "' does not appear in the crush map"; - goto reply; + goto reply_no_propose; } int id = newcrush.get_item_id(name); if (id < 0) { ss << "device '" << name << "' is not a leaf in the crush map"; err = -EINVAL; - goto reply; + goto reply_no_propose; } double w; if (!cmd_getval(cmdmap, "weight", w)) { ss << "unable to parse weight value '" << cmd_vartype_stringify(cmdmap.at("weight")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } err = newcrush.adjust_item_weightf(cct, id, w, g_conf()->osd_crush_update_weight_set); if (err < 0) - goto reply; + goto reply_no_propose; pending_inc.crush.clear(); newcrush.encode(pending_inc.crush, mon.get_quorum_con_features()); ss << "reweighted item id " << id << " name '" << name << "' to " << w @@ -11031,27 +11032,27 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!newcrush.name_exists(name)) { err = -ENOENT; ss << "device '" << name << "' does not appear in the crush map"; - goto reply; + goto reply_no_propose; } int id = newcrush.get_item_id(name); if (id >= 0) { ss << "device '" << name << "' is not a subtree in the crush map"; err = -EINVAL; - goto reply; + goto reply_no_propose; } double w; if (!cmd_getval(cmdmap, "weight", w)) { ss << "unable to parse weight value '" << cmd_vartype_stringify(cmdmap.at("weight")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } err = newcrush.adjust_subtree_weightf(cct, id, w, g_conf()->osd_crush_update_weight_set); if (err < 0) - goto reply; + goto reply_no_propose; pending_inc.crush.clear(); newcrush.encode(pending_inc.crush, mon.get_quorum_con_features()); ss << "reweighted subtree id " << id << " name '" << name << "' to " << w @@ -11083,12 +11084,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "unrecognized profile '" << profile << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (!validate_crush_against_features(&newcrush, ss)) { err = -EINVAL; - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); @@ -11110,25 +11111,25 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = -EINVAL; ss << "failed to parse integer value " << cmd_vartype_stringify(cmdmap.at("value")); - goto reply; + goto reply_no_propose; } if (tunable == "straw_calc_version") { if (value != 0 && value != 1) { ss << "value must be 0 or 1; got " << value; err = -EINVAL; - goto reply; + goto reply_no_propose; } newcrush.set_straw_calc_version(value); } else { ss << "unrecognized tunable '" << tunable << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (!validate_crush_against_features(&newcrush, ss)) { err = -EINVAL; - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); @@ -11153,7 +11154,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, // From the user point of view, the rule is more meaningfull. ss << "rule " << name << " already exists"; err = 0; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); @@ -11168,7 +11169,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, pg_pool_t::TYPE_REPLICATED, &ss); if (ruleno < 0) { err = ruleno; - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); @@ -11191,7 +11192,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, // From the user point of view, the rule is more meaningfull. ss << "rule " << name << " already exists"; err = 0; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); @@ -11207,7 +11208,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, "firstn", pg_pool_t::TYPE_REPLICATED, &ss); if (ruleno < 0) { err = ruleno; - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); @@ -11227,7 +11228,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (erasure_code_profile_in_use(osdmap.pools, name, &ss)) { err = -EBUSY; - goto reply; + goto reply_no_propose; } if (osdmap.has_erasure_code_profile(name) || @@ -11246,7 +11247,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "erasure-code-profile " << name << " does not exist"; err = 0; - goto reply; + goto reply_no_propose; } } else if (prefix == "osd erasure-code-profile set") { @@ -11261,7 +11262,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, map profile_map; err = parse_erasure_code_profile(profile, &profile_map, &ss); if (err) - goto reply; + goto reply_no_propose; if (auto found = profile_map.find("crush-failure-domain"); found != profile_map.end()) { const auto& failure_domain = found->second; @@ -11270,7 +11271,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "erasure-code-profile " << profile_map << " contains an invalid failure-domain " << std::quoted(failure_domain); err = -EINVAL; - goto reply; + goto reply_no_propose; } } @@ -11278,7 +11279,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "erasure-code-profile " << profile_map << " must contain a plugin entry" << std::endl; err = -EINVAL; - goto reply; + goto reply_no_propose; } string plugin = profile_map["plugin"]; @@ -11288,18 +11289,18 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { err = normalize_profile(name, profile_map, force, &ss); if (err) - goto reply; + goto reply_no_propose; if (osdmap.has_erasure_code_profile(name)) { ErasureCodeProfile existing_profile_map = osdmap.get_erasure_code_profile(name); err = normalize_profile(name, existing_profile_map, force, &ss); if (err) - goto reply; + goto reply_no_propose; if (existing_profile_map == profile_map) { err = 0; - goto reply; + goto reply_no_propose; } if (!force) { err = -EPERM; @@ -11308,7 +11309,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << existing_profile_map << " is different from the proposed profile " << profile_map; - goto reply; + goto reply_no_propose; } } @@ -11327,7 +11328,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) goto wait; if (err) - goto reply; + goto reply_no_propose; string name, poolstr; cmd_getval(cmdmap, "name", name); string profile; @@ -11346,10 +11347,10 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, profile_map, &ss); if (err) - goto reply; + goto reply_no_propose; err = normalize_profile(name, profile_map, true, &ss); if (err) - goto reply; + goto reply_no_propose; dout(20) << "erasure code profile set " << profile << "=" << profile_map << dendl; pending_inc.set_erasure_code_profile(profile, profile_map); @@ -11364,15 +11365,13 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, case -EEXIST: // return immediately ss << "rule " << name << " already exists"; err = 0; - goto reply; - break; + goto reply_no_propose; case -EALREADY: // wait for pending to be proposed ss << "rule " << name << " already exists"; err = 0; break; default: // non recoverable error - goto reply; - break; + goto reply_no_propose; } } else { ss << "created rule " << name << " at " << rule; @@ -11390,7 +11389,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!osdmap.crush->rule_exists(name)) { ss << "rule " << name << " does not exist"; err = 0; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); @@ -11408,12 +11407,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (osdmap.crush_rule_in_use(ruleno)) { ss << "crush rule " << name << " (" << ruleno << ") is in use"; err = -EBUSY; - goto reply; + goto reply_no_propose; } err = newcrush.remove_rule(ruleno); if (err < 0) { - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); @@ -11432,12 +11431,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (srcname.empty() || dstname.empty()) { ss << "must specify both source rule name and destination rule name"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (srcname == dstname) { ss << "destination rule name is equal to source rule name"; err = 0; - goto reply; + goto reply_no_propose; } CrushWrapper newcrush = _get_pending_crush(); @@ -11447,13 +11446,13 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, // (so this command is idempotent) ss << "already renamed to '" << dstname << "'"; err = 0; - goto reply; + goto reply_no_propose; } err = newcrush.rename_rule(srcname, dstname, &ss); if (err < 0) { // ss has reason for failure - goto reply; + goto reply_no_propose; } pending_inc.crush.clear(); newcrush.encode(pending_inc.crush, mon.get_quorum_con_features()); @@ -11468,14 +11467,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse 'newmax' value '" << cmd_vartype_stringify(cmdmap.at("newmax")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (newmax > g_conf()->mon_max_osd) { err = -ERANGE; ss << "cannot set max_osd to " << newmax << " which is > conf.mon_max_osd (" << g_conf()->mon_max_osd << ")"; - goto reply; + goto reply_no_propose; } // Don't allow shrinking OSD number as this will cause data loss @@ -11490,7 +11489,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = -EBUSY; ss << "cannot shrink max_osd to " << newmax << " because osd." << i << " (and possibly others) still in use"; - goto reply; + goto reply_no_propose; } } } @@ -11510,7 +11509,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse 'ratio' value '" << cmd_vartype_stringify(cmdmap.at("ratio")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (prefix == "osd set-full-ratio") pending_inc.new_full_ratio = n; @@ -11530,7 +11529,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!vno) { ss << "version " << v << " is not recognized"; err = -EINVAL; - goto reply; + goto reply_no_propose; } OSDMap newmap; newmap.deepish_copy_from(osdmap); @@ -11541,7 +11540,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "osdmap current utilizes features that require " << mvno << "; cannot set require_min_compat_client below that to " << vno; err = -EPERM; - goto reply; + goto reply_no_propose; } bool sure = false; cmd_getval(cmdmap, "yes_i_really_mean_it", sure); @@ -11579,7 +11578,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!ok) { ss << "; add --yes-i-really-mean-it to do it anyway"; err = -EPERM; - goto reply; + goto reply_no_propose; } } ss << "set require_min_compat_client to " << vno; @@ -11629,7 +11628,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "Not advisable to continue since no OSDs are up. Pass " << "--yes-i-really-mean-it if you really wish to continue."; err = -EPERM; - goto reply; + goto reply_no_propose; } // The release check here is required because for OSD_PGLOG_HARDLIMIT, // we are reusing a jewel feature bit that was retired in luminous. @@ -11640,8 +11639,10 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "not all up OSDs have OSD_PGLOG_HARDLIMIT feature"; err = -EPERM; - goto reply; + goto reply_no_propose; } + } else if (key == "noautoscale") { + return prepare_set_flag(op, CEPH_OSDMAP_NOAUTOSCALE); } else { ss << "unrecognized flag '" << key << "'"; err = -EINVAL; @@ -11674,6 +11675,8 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, return prepare_unset_flag(op, CEPH_OSDMAP_NOTIERAGENT); else if (key == "nosnaptrim") return prepare_unset_flag(op, CEPH_OSDMAP_NOSNAPTRIM); + else if (key == "noautoscale") + return prepare_unset_flag(op, CEPH_OSDMAP_NOAUTOSCALE); else { ss << "unrecognized flag '" << key << "'"; err = -EINVAL; @@ -11688,74 +11691,74 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!rel) { ss << "unrecognized release " << release; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (rel == osdmap.require_osd_release) { // idempotent err = 0; - goto reply; + goto reply_no_propose; } if (osdmap.require_osd_release < ceph_release_t::pacific && !sure) { ss << "Not advisable to continue since current 'require_osd_release' " << "refers to a very old Ceph release. Pass " << "--yes-i-really-mean-it if you really wish to continue."; err = -EPERM; - goto reply; + goto reply_no_propose; } if (!osdmap.get_num_up_osds() && !sure) { ss << "Not advisable to continue since no OSDs are up. Pass " << "--yes-i-really-mean-it if you really wish to continue."; err = -EPERM; - goto reply; + goto reply_no_propose; } if (rel == ceph_release_t::pacific) { if (!mon.monmap->get_required_features().contains_all( ceph::features::mon::FEATURE_PACIFIC)) { ss << "not all mons are pacific"; err = -EPERM; - goto reply; + goto reply_no_propose; } if ((!HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_PACIFIC)) && !sure) { ss << "not all up OSDs have CEPH_FEATURE_SERVER_PACIFIC feature"; err = -EPERM; - goto reply; + goto reply_no_propose; } } else if (rel == ceph_release_t::quincy) { if (!mon.monmap->get_required_features().contains_all( ceph::features::mon::FEATURE_QUINCY)) { ss << "not all mons are quincy"; err = -EPERM; - goto reply; + goto reply_no_propose; } if ((!HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_QUINCY)) && !sure) { ss << "not all up OSDs have CEPH_FEATURE_SERVER_QUINCY feature"; err = -EPERM; - goto reply; + goto reply_no_propose; } } else if (rel == ceph_release_t::reef) { if (!mon.monmap->get_required_features().contains_all( ceph::features::mon::FEATURE_REEF)) { ss << "not all mons are reef"; err = -EPERM; - goto reply; + goto reply_no_propose; } if ((!HAVE_FEATURE(osdmap.get_up_osd_features(), SERVER_REEF)) && !sure) { ss << "not all up OSDs have CEPH_FEATURE_SERVER_REEF feature"; err = -EPERM; - goto reply; + goto reply_no_propose; } } else { ss << "not supported for this release"; err = -EPERM; - goto reply; + goto reply_no_propose; } if (rel < osdmap.require_osd_release) { ss << "require_osd_release cannot be lowered once it has been set"; err = -EPERM; - goto reply; + goto reply_no_propose; } pending_inc.new_require_osd_release = rel; goto update; @@ -11936,7 +11939,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unrecognized flag '" << f << "', must be one of " << "{noup,nodown,noin,noout}"; err = -EINVAL; - goto reply; + goto reply_no_propose; } } } else { @@ -11955,12 +11958,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (flags == 0) { ss << "must specify flag(s) {noup,nodwon,noin,noout} to set/unset"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (who.empty()) { ss << "must specify at least one or more targets to set/unset"; err = -EINVAL; - goto reply; + goto reply_no_propose; } set osds; set crush_nodes; @@ -11985,7 +11988,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (osds.empty() && crush_nodes.empty() && device_classes.empty()) { // ss has reason for failure err = -EINVAL; - goto reply; + goto reply_no_propose; } bool any = false; for (auto osd : osds) { @@ -12069,11 +12072,10 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, pg_t pgid; err = parse_pgid(cmdmap, ss, pgid); if (err < 0) - goto reply; + goto reply_no_propose; if (pending_inc.new_pg_temp.count(pgid)) { dout(10) << __func__ << " waiting for pending update on " << pgid << dendl; - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } vector id_vec; @@ -12088,7 +12090,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!osdmap.exists(osd)) { ss << "osd." << osd << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } new_pg_temp.push_back(osd); } @@ -12098,7 +12100,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "num of osds (" << new_pg_temp.size() <<") < pool min size (" << pool_min_size << ")"; err = -EINVAL; - goto reply; + goto reply_no_propose; } int pool_size = osdmap.get_pg_pool_size(pgid); @@ -12106,7 +12108,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "num of osds (" << new_pg_temp.size() <<") > pool size (" << pool_size << ")"; err = -EINVAL; - goto reply; + goto reply_no_propose; } pending_inc.new_pg_temp[pgid] = mempool::osdmap::vector( @@ -12118,7 +12120,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, pg_t pgid; err = parse_pgid(cmdmap, ss, pgid); if (err < 0) - goto reply; + goto reply_no_propose; int64_t osd; if (prefix == "osd primary-temp") { @@ -12126,12 +12128,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse 'id' value '" << cmd_vartype_stringify(cmdmap.at("id")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (!osdmap.exists(osd)) { ss << "osd." << osd << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } } else if (prefix == "osd rm-primary-temp") { @@ -12147,7 +12149,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << osdmap.require_min_compat_client << " < firefly, which is required for primary-temp"; err = -EPERM; - goto reply; + goto reply_no_propose; } pending_inc.new_primary_temp[pgid] = osd; @@ -12157,14 +12159,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, pg_t pgid; err = parse_pgid(cmdmap, ss, pgid); if (err < 0) - goto reply; + goto reply_no_propose; vector acting; int primary; osdmap.pg_to_acting_osds(pgid, &acting, &primary); if (primary < 0) { err = -EAGAIN; ss << "pg currently has no primary"; - goto reply; + goto reply_no_propose; } if (acting.size() > 1) { // map to just primary; it will map back to what it wants @@ -12185,7 +12187,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!done) { err = -EAGAIN; ss << "not enough up OSDs in the cluster to force repeer"; - goto reply; + goto reply_no_propose; } } goto update; @@ -12250,7 +12252,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << "Try 'ceph osd set-require-min-compat-client " << min_release_name << "' " << "before using the new interface"; err = -EPERM; - goto reply; + goto reply_no_propose; } //TODO: Should I add feature and test for upmap-primary? @@ -12258,11 +12260,11 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) goto wait; if (err < 0) - goto reply; + goto reply_no_propose; pg_t pgid; err = parse_pgid(cmdmap, ss, pgid); if (err < 0) - goto reply; + goto reply_no_propose; if (pending_inc.old_pools.count(pgid.pool())) { ss << "pool of " << pgid << " is pending removal"; err = -ENOENT; @@ -12280,8 +12282,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, pending_inc.old_pg_upmap.count(pgid)) { dout(10) << __func__ << " waiting for pending update on " << pgid << dendl; - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } break; @@ -12292,7 +12293,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (! pt->is_replicated()) { ss << "pg-upmap-primary is only supported for replicated pools"; err = -EINVAL; - goto reply; + goto reply_no_propose; } } // fall through @@ -12302,8 +12303,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, pending_inc.old_pg_upmap_items.count(pgid)) { dout(10) << __func__ << " waiting for pending update on " << pgid << dendl; - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } break; @@ -12319,7 +12319,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse 'id' value(s) '" << cmd_vartype_stringify(cmdmap.at("id")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } int pool_min_size = osdmap.get_pg_pool_min_size(pgid); @@ -12327,7 +12327,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "num of osds (" << id_vec.size() <<") < pool min size (" << pool_min_size << ")"; err = -EINVAL; - goto reply; + goto reply_no_propose; } int pool_size = osdmap.get_pg_pool_size(pgid); @@ -12335,7 +12335,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "num of osds (" << id_vec.size() <<") > pool size (" << pool_size << ")"; err = -EINVAL; - goto reply; + goto reply_no_propose; } vector new_pg_upmap; @@ -12343,7 +12343,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (osd != CRUSH_ITEM_NONE && !osdmap.exists(osd)) { ss << "osd." << osd << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } auto it = std::find(new_pg_upmap.begin(), new_pg_upmap.end(), osd); if (it != new_pg_upmap.end()) { @@ -12356,7 +12356,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (new_pg_upmap.empty()) { ss << "no valid upmap items(pairs) is specified"; err = -EINVAL; - goto reply; + goto reply_no_propose; } pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector( @@ -12379,13 +12379,13 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse 'id' value(s) '" << cmd_vartype_stringify(cmdmap.at("id")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (id_vec.size() % 2) { ss << "you must specify pairs of osd ids to be remapped"; err = -EINVAL; - goto reply; + goto reply_no_propose; } int pool_size = osdmap.get_pg_pool_size(pgid); @@ -12393,7 +12393,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "num of osd pairs (" << id_vec.size() / 2 <<") > pool size (" << pool_size << ")"; err = -EINVAL; - goto reply; + goto reply_no_propose; } vector> new_pg_upmap_items; @@ -12409,12 +12409,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!osdmap.exists(from)) { ss << "osd." << from << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } if (to != CRUSH_ITEM_NONE && !osdmap.exists(to)) { ss << "osd." << to << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } pair entry = make_pair(from, to); auto it = std::find(new_pg_upmap_items.begin(), @@ -12433,7 +12433,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (new_pg_upmap_items.empty()) { ss << "no valid upmap items(pairs) is specified"; err = -EINVAL; - goto reply; + goto reply_no_propose; } pending_inc.new_pg_upmap_items[pgid] = @@ -12457,12 +12457,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "invalid osd id value '" << cmd_vartype_stringify(cmdmap.at("id")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (id != CRUSH_ITEM_NONE && !osdmap.exists(id)) { ss << "osd." << id << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } vector acting; int primary; @@ -12470,7 +12470,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (id == primary) { ss << "osd." << id << " is already primary for pg " << pgid; err = -EINVAL; - goto reply; + goto reply_no_propose; } int found_idx = 0; for (int i = 1 ; i < (int)acting.size(); i++) { // skip 0 on purpose @@ -12482,7 +12482,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (found_idx == 0) { ss << "osd." << id << " is not in acting set for pg " << pgid; err = -EINVAL; - goto reply; + goto reply_no_propose; } vector new_acting(acting); new_acting[found_idx] = new_acting[0]; @@ -12496,7 +12496,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "can't change primary for pg " << pgid << " to osd." << id << " - illegal pg after the change"; err = -EINVAL; - goto reply; + goto reply_no_propose; } pending_inc.new_pg_upmap_primary[pgid] = id; //TO-REMOVE: @@ -12522,20 +12522,20 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "invalid osd id value '" << cmd_vartype_stringify(cmdmap.at("id")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } double w; if (!cmd_getval(cmdmap, "weight", w)) { ss << "unable to parse 'weight' value '" << cmd_vartype_stringify(cmdmap.at("weight")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } long ww = (int)((double)CEPH_OSD_MAX_PRIMARY_AFFINITY*w); if (ww < 0L) { ss << "weight must be >= 0"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (osdmap.require_min_compat_client != ceph_release_t::unknown && osdmap.require_min_compat_client < ceph_release_t::firefly) { @@ -12543,7 +12543,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << osdmap.require_min_compat_client << " < firefly, which is required for primary-affinity"; err = -EPERM; - goto reply; + goto reply_no_propose; } if (osdmap.exists(id)) { pending_inc.new_primary_affinity[id] = ww; @@ -12555,7 +12555,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "osd." << id << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } } else if (prefix == "osd reweight") { int64_t id; @@ -12563,20 +12563,20 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse osd id value '" << cmd_vartype_stringify(cmdmap.at("id")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } double w; if (!cmd_getval(cmdmap, "weight", w)) { ss << "unable to parse weight value '" << cmd_vartype_stringify(cmdmap.at("weight")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } long ww = (int)((double)CEPH_OSD_IN*w); if (ww < 0L) { ss << "weight must be >= 0"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (osdmap.exists(id)) { pending_inc.new_weight[id] = ww; @@ -12588,7 +12588,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "osd." << id << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } } else if (prefix == "osd reweightn") { map weights; @@ -12596,7 +12596,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err) { ss << "unable to parse 'weights' value '" << cmd_vartype_stringify(cmdmap.at("weights")) << "'"; - goto reply; + goto reply_no_propose; } pending_inc.new_weight.insert(weights.begin(), weights.end()); wait_for_finished_proposal( @@ -12609,7 +12609,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse osd id value '" << cmd_vartype_stringify(cmdmap.at("id")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } bool sure = false; cmd_getval(cmdmap, "yes_i_really_mean_it", sure); @@ -12617,15 +12617,15 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "are you SURE? this might mean real, permanent data loss. pass " "--yes-i-really-mean-it if you really do."; err = -EPERM; - goto reply; + goto reply_no_propose; } else if (!osdmap.exists(id)) { ss << "osd." << id << " does not exist"; err = -ENOENT; - goto reply; + goto reply_no_propose; } else if (!osdmap.is_down(id)) { ss << "osd." << id << " is not down"; err = -EBUSY; - goto reply; + goto reply_no_propose; } else { epoch_t e = osdmap.get_info(id).down_at; pending_inc.new_lost[id] = e; @@ -12669,7 +12669,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << cmd_vartype_stringify(cmdmap.at("id")) << ""; } err = -EINVAL; - goto reply; + goto reply_no_propose; } bool is_destroy = (prefix == "osd destroy-actual"); @@ -12686,26 +12686,26 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << "as deletion of cephx and lockbox keys. " << "Pass --yes-i-really-mean-it if you really do."; err = -EPERM; - goto reply; + goto reply_no_propose; } else if (!osdmap.exists(id)) { ss << "osd." << id << " does not exist"; err = 0; // idempotent - goto reply; + goto reply_no_propose; } else if (osdmap.is_up(id)) { ss << "osd." << id << " is not `down`."; err = -EBUSY; - goto reply; + goto reply_no_propose; } else if (is_destroy && osdmap.is_destroyed(id)) { ss << "destroyed osd." << id; err = 0; - goto reply; + goto reply_no_propose; } if (prefix == "osd purge-new" && (osdmap.get_state(id) & CEPH_OSD_NEW) == 0) { ss << "osd." << id << " is not new"; err = -EPERM; - goto reply; + goto reply_no_propose; } bool goto_reply = false; @@ -12726,7 +12726,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, paxos.unplug(); if (err < 0 || goto_reply) { - goto reply; + goto reply_no_propose; } if (is_destroy) { @@ -12767,7 +12767,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = get_json_str_map(param_json, ss, ¶m_map); if (err < 0) - goto reply; + goto reply_no_propose; dout(20) << __func__ << " osd new params " << param_map << dendl; @@ -12776,7 +12776,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, paxos.unplug(); if (err < 0) { - goto reply; + goto reply_no_propose; } if (f) { @@ -12788,7 +12788,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == EEXIST) { // idempotent operation err = 0; - goto reply; + goto reply_no_propose; } wait_for_finished_proposal(op, @@ -12805,7 +12805,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (cmd_id < 0) { ss << "invalid osd id value '" << cmd_id << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } dout(10) << " osd create got id " << cmd_id << dendl; } @@ -12816,7 +12816,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!uuid.parse(uuidstr.c_str())) { ss << "invalid uuid value '" << uuidstr << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } // we only care about the id if we also have the uuid, to // ensure the operation's idempotency. @@ -12827,11 +12827,10 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = prepare_command_osd_create(id, uuid, &new_id, ss); if (err < 0) { if (err == -EAGAIN) { - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } // a check has failed; reply to the user. - goto reply; + goto reply_no_propose; } else if (err == EEXIST) { // this is an idempotent operation; we can go ahead and reply. @@ -12845,7 +12844,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, rdata.append(ss); } err = 0; - goto reply; + goto reply_no_propose; } string empty_device_class; @@ -12893,14 +12892,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "Did you mean to specify \"osd blocklist range\"?"; err = -EINVAL; - goto reply; + goto reply_no_propose; } } entity_addr_t addr; if (!addr.parse(addrstr)) { ss << "unable to parse address " << addrstr; err = -EINVAL; - goto reply; + goto reply_no_propose; } else { if (range) { @@ -12908,18 +12907,18 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "You specified a range command, but " << addr << " does not parse as a CIDR range"; err = -EINVAL; - goto reply; + goto reply_no_propose; } addr.type = entity_addr_t::TYPE_CIDR; err = check_cluster_features(CEPH_FEATUREMASK_RANGE_BLOCKLIST, ss); if (err) { - goto reply; + goto reply_no_propose; } if ((addr.is_ipv4() && addr.get_nonce() > 32) || (addr.is_ipv6() && addr.get_nonce() > 128)) { ss << "Too many bits in range for that protocol!"; err = -EINVAL; - goto reply; + goto reply_no_propose; } } else { if (osdmap.require_osd_release >= ceph_release_t::nautilus) { @@ -12995,7 +12994,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } ss << addr << " isn't blocklisted"; err = 0; - goto reply; + goto reply_no_propose; } } } else if (prefix == "osd pool mksnap") { @@ -13005,7 +13004,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } string snapname; cmd_getval(cmdmap, "snap", snapname); @@ -13013,15 +13012,15 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (p->is_unmanaged_snaps_mode()) { ss << "pool " << poolstr << " is in unmanaged snaps mode"; err = -EINVAL; - goto reply; + goto reply_no_propose; } else if (p->snap_exists(snapname.c_str())) { ss << "pool " << poolstr << " snap " << snapname << " already exists"; err = 0; - goto reply; + goto reply_no_propose; } else if (p->is_tier()) { ss << "pool " << poolstr << " is a cache tier"; err = -EINVAL; - goto reply; + goto reply_no_propose; } pg_pool_t *pp = 0; if (pending_inc.new_pools.count(pool)) @@ -13037,7 +13036,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, dout(20) << "pool-level snapshots have been disabled for pools " "attached to an fs - poolid:" << pool << dendl; err = -EOPNOTSUPP; - goto reply; + goto reply_no_propose; } pp->add_snap(snapname.c_str(), ceph_clock_now()); pp->set_snap_epoch(pending_inc.epoch); @@ -13054,7 +13053,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } string snapname; cmd_getval(cmdmap, "snap", snapname); @@ -13062,11 +13061,11 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (p->is_unmanaged_snaps_mode()) { ss << "pool " << poolstr << " is in unmanaged snaps mode"; err = -EINVAL; - goto reply; + goto reply_no_propose; } else if (!p->snap_exists(snapname.c_str())) { ss << "pool " << poolstr << " snap " << snapname << " does not exist"; err = 0; - goto reply; + goto reply_no_propose; } pg_pool_t *pp = 0; if (pending_inc.new_pools.count(pool)) @@ -13105,7 +13104,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (poolstr[0] == '.' && !confirm) { ss << "pool names beginning with . are not allowed"; err = 0; - goto reply; + goto reply_no_propose; } int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr); if (pool_id >= 0) { @@ -13117,7 +13116,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "pool '" << poolstr << "' already exists"; err = 0; } - goto reply; + goto reply_no_propose; } int pool_type; @@ -13128,7 +13127,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "unknown pool type '" << pool_type_str << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } bool implicit_rule_creation = false; @@ -13154,7 +13153,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, profile_map, &ss); if (err) - goto reply; + goto reply_no_propose; dout(20) << "erasure code profile " << erasure_code_profile << " set" << dendl; pending_inc.set_erasure_code_profile(erasure_code_profile, profile_map); goto wait; @@ -13182,7 +13181,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (interr.length()) { ss << "error parsing integer value '" << rule_name << "': " << interr; err = -EINVAL; - goto reply; + goto reply_no_propose; } } rule_name = erasure_code_profile; @@ -13196,17 +13195,16 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, int rule; err = get_crush_rule(rule_name, &rule, &ss); if (err == -EAGAIN) { - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } if (err) - goto reply; + goto reply_no_propose; } if (expected_num_objects < 0) { ss << "'expected_num_objects' must be non-negative"; err = -EINVAL; - goto reply; + goto reply_no_propose; } set osds; @@ -13225,7 +13223,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, cct->_conf->filestore_merge_threshold > 0) { ss << "'expected_num_objects' requires 'filestore_merge_threshold < 0'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (has_filestore_osd && @@ -13240,7 +13238,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << "expected_num_objects parameter when creating the pool." << " Pass --yes-i-really-mean-it to ignore it"; err = -EPERM; - goto reply; + goto reply_no_propose; } } @@ -13282,15 +13280,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, switch(err) { case -EEXIST: ss << "pool '" << poolstr << "' already exists"; - break; + err = 0; + goto reply_no_propose; case -EAGAIN: - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; case -ERANGE: - goto reply; + goto reply_no_propose; default: - goto reply; - break; + goto reply_no_propose; } } else { ss << "pool '" << poolstr << "' created"; @@ -13310,7 +13307,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool < 0) { ss << "pool '" << poolstr << "' does not exist"; err = 0; - goto reply; + goto reply_no_propose; } bool force_no_fake = false; @@ -13323,15 +13320,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << ". If you are *ABSOLUTELY CERTAIN* that is what you want, pass the pool name *twice*, " << "followed by --yes-i-really-really-mean-it."; err = -EPERM; - goto reply; + goto reply_no_propose; } err = _prepare_remove_pool(pool, &ss, force_no_fake); if (err == -EAGAIN) { - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } if (err < 0) - goto reply; + goto reply_no_propose; goto update; } else if (prefix == "osd pool rename") { string srcpoolstr, destpoolstr; @@ -13345,7 +13341,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (destpoolstr[0] == '.' && !confirm) { ss << "pool names beginning with . are not allowed"; err = 0; - goto reply; + goto reply_no_propose; } if (pool_src < 0) { if (pool_dst >= 0) { @@ -13362,12 +13358,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unrecognized pool '" << srcpoolstr << "'"; err = -ENOENT; } - goto reply; + goto reply_no_propose; } else if (pool_dst >= 0) { // source pool exists and so does the destination pool ss << "pool '" << destpoolstr << "' already exists"; err = -EEXIST; - goto reply; + goto reply_no_propose; } int ret = _prepare_rename_pool(pool_src, destpoolstr); @@ -13387,7 +13383,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) goto wait; if (err < 0) - goto reply; + goto reply_no_propose; getline(ss, rs); wait_for_finished_proposal(op, new Monitor::C_Command(mon, op, 0, rs, @@ -13398,14 +13394,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) goto wait; if (err) - goto reply; + goto reply_no_propose; string poolstr; cmd_getval(cmdmap, "pool", poolstr); int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr); if (pool_id < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } string tierpoolstr; cmd_getval(cmdmap, "tierpool", tierpoolstr); @@ -13413,7 +13409,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (tierpool_id < 0) { ss << "unrecognized pool '" << tierpoolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } const pg_pool_t *p = osdmap.get_pg_pool(pool_id); ceph_assert(p); @@ -13421,7 +13417,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ceph_assert(tp); if (!_check_become_tier(tierpool_id, tp, pool_id, p, &err, &ss)) { - goto reply; + goto reply_no_propose; } // make sure new tier is empty @@ -13432,27 +13428,26 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, !force_nonempty) { ss << "tier pool '" << tierpoolstr << "' is not empty; --force-nonempty to force"; err = -ENOTEMPTY; - goto reply; + goto reply_no_propose; } if (tp->is_erasure()) { ss << "tier pool '" << tierpoolstr << "' is an ec pool, which cannot be a tier"; err = -ENOTSUP; - goto reply; + goto reply_no_propose; } if ((!tp->removed_snaps.empty() || !tp->snaps.empty()) && (!force_nonempty || !g_conf()->mon_debug_unsafe_allow_tier_with_nonempty_snaps)) { ss << "tier pool '" << tierpoolstr << "' has snapshot state; it cannot be added as a tier without breaking the pool"; err = -ENOTEMPTY; - goto reply; + goto reply_no_propose; } // go pg_pool_t *np = pending_inc.get_new_pool(pool_id, p); pg_pool_t *ntp = pending_inc.get_new_pool(tierpool_id, tp); if (np->tiers.count(tierpool_id) || ntp->is_tier()) { - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } np->tiers.insert(tierpool_id); np->set_snap_epoch(pending_inc.epoch); // tier will update to our snap info @@ -13469,7 +13464,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool_id < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } string tierpoolstr; cmd_getval(cmdmap, "tierpool", tierpoolstr); @@ -13477,7 +13472,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (tierpool_id < 0) { ss << "unrecognized pool '" << tierpoolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } const pg_pool_t *p = osdmap.get_pg_pool(pool_id); ceph_assert(p); @@ -13485,13 +13480,13 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ceph_assert(tp); if (!_check_remove_tier(pool_id, p, tp, &err, &ss)) { - goto reply; + goto reply_no_propose; } if (p->tiers.count(tierpool_id) == 0) { ss << "pool '" << tierpoolstr << "' is now (or already was) not a tier of '" << poolstr << "'"; err = 0; - goto reply; + goto reply_no_propose; } if (tp->tier_of != pool_id) { ss << "tier pool '" << tierpoolstr << "' is a tier of '" @@ -13499,12 +13494,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, // be scary about it; this is an inconsistency and bells must go off << "THIS SHOULD NOT HAVE HAPPENED AT ALL"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (p->read_tier == tierpool_id) { ss << "tier pool '" << tierpoolstr << "' is the overlay for '" << poolstr << "'; please remove-overlay first"; err = -EBUSY; - goto reply; + goto reply_no_propose; } // go pg_pool_t *np = pending_inc.get_new_pool(pool_id, p); @@ -13512,8 +13507,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (np->tiers.count(tierpool_id) == 0 || ntp->tier_of != pool_id || np->read_tier == tierpool_id) { - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } np->tiers.erase(tierpool_id); ntp->clear_tier(); @@ -13526,14 +13520,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) goto wait; if (err) - goto reply; + goto reply_no_propose; string poolstr; cmd_getval(cmdmap, "pool", poolstr); int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr); if (pool_id < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } string overlaypoolstr; cmd_getval(cmdmap, "overlaypool", overlaypoolstr); @@ -13541,7 +13535,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (overlaypool_id < 0) { ss << "unrecognized pool '" << overlaypoolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } const pg_pool_t *p = osdmap.get_pg_pool(pool_id); ceph_assert(p); @@ -13550,19 +13544,19 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (p->tiers.count(overlaypool_id) == 0) { ss << "tier pool '" << overlaypoolstr << "' is not a tier of '" << poolstr << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } if (p->read_tier == overlaypool_id) { err = 0; ss << "overlay for '" << poolstr << "' is now (or already was) '" << overlaypoolstr << "'"; - goto reply; + goto reply_no_propose; } if (p->has_read_tier()) { ss << "pool '" << poolstr << "' has overlay '" << osdmap.get_pool_name(p->read_tier) << "'; please remove-overlay first"; err = -EINVAL; - goto reply; + goto reply_no_propose; } // go @@ -13586,18 +13580,18 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool_id < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } const pg_pool_t *p = osdmap.get_pg_pool(pool_id); ceph_assert(p); if (!p->has_read_tier()) { err = 0; ss << "there is now (or already was) no overlay for '" << poolstr << "'"; - goto reply; + goto reply_no_propose; } if (!_check_remove_tier(pool_id, p, NULL, &err, &ss)) { - goto reply; + goto reply_no_propose; } // go @@ -13624,21 +13618,21 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) goto wait; if (err) - goto reply; + goto reply_no_propose; string poolstr; cmd_getval(cmdmap, "pool", poolstr); int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr); if (pool_id < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } const pg_pool_t *p = osdmap.get_pg_pool(pool_id); ceph_assert(p); if (!p->is_tier()) { ss << "pool '" << poolstr << "' is not a tier"; err = -EINVAL; - goto reply; + goto reply_no_propose; } string modestr; cmd_getval(cmdmap, "mode", modestr); @@ -13646,7 +13640,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (int(mode) < 0) { ss << "'" << modestr << "' is not a valid cache mode"; err = -EINVAL; - goto reply; + goto reply_no_propose; } bool sure = false; @@ -13656,7 +13650,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, mode == pg_pool_t::CACHEMODE_READFORWARD) { ss << "'" << modestr << "' is no longer a supported cache mode"; err = -EPERM; - goto reply; + goto reply_no_propose; } if ((mode != pg_pool_t::CACHEMODE_WRITEBACK && mode != pg_pool_t::CACHEMODE_NONE && @@ -13666,7 +13660,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "'" << modestr << "' is not a well-supported cache mode and may " << "corrupt your data. pass --yes-i-really-mean-it to force."; err = -EPERM; - goto reply; + goto reply_no_propose; } // pool already has this cache-mode set and there are no pending changes @@ -13676,7 +13670,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "set cache-mode for pool '" << poolstr << "'" << " to " << pg_pool_t::get_cache_mode_name(mode); err = 0; - goto reply; + goto reply_no_propose; } /* Mode description: @@ -13715,7 +13709,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << pg_pool_t::get_cache_mode_name(pg_pool_t::CACHEMODE_READPROXY) << "' allowed."; err = -EINVAL; - goto reply; + goto reply_no_propose; } if ((p->cache_mode == pg_pool_t::CACHEMODE_READFORWARD && (mode != pg_pool_t::CACHEMODE_WRITEBACK && @@ -13743,7 +13737,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << pg_pool_t::get_cache_mode_name(mode) << "' on pool '" << poolstr << "': dirty objects found"; err = -EBUSY; - goto reply; + goto reply_no_propose; } } // go @@ -13769,14 +13763,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) goto wait; if (err) - goto reply; + goto reply_no_propose; string poolstr; cmd_getval(cmdmap, "pool", poolstr); int64_t pool_id = osdmap.lookup_pg_pool_name(poolstr); if (pool_id < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } string tierpoolstr; cmd_getval(cmdmap, "tierpool", tierpoolstr); @@ -13784,7 +13778,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (tierpool_id < 0) { ss << "unrecognized pool '" << tierpoolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } const pg_pool_t *p = osdmap.get_pg_pool(pool_id); ceph_assert(p); @@ -13792,7 +13786,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ceph_assert(tp); if (!_check_become_tier(tierpool_id, tp, pool_id, p, &err, &ss)) { - goto reply; + goto reply_no_propose; } int64_t size = 0; @@ -13800,7 +13794,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "unable to parse 'size' value '" << cmd_vartype_stringify(cmdmap.at("size")) << "'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } // make sure new tier is empty const pool_stat_t *pstats = @@ -13808,14 +13802,14 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pstats && pstats->stats.sum.num_objects != 0) { ss << "tier pool '" << tierpoolstr << "' is not empty"; err = -ENOTEMPTY; - goto reply; + goto reply_no_propose; } auto& modestr = g_conf().get_val("osd_tier_default_cache_mode"); pg_pool_t::cache_mode_t mode = pg_pool_t::get_cache_mode_from_str(modestr); if (int(mode) < 0) { ss << "osd tier cache default mode '" << modestr << "' is not a valid cache mode"; err = -EINVAL; - goto reply; + goto reply_no_propose; } HitSet::Params hsp; auto& cache_hit_set_type = @@ -13832,14 +13826,13 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, ss << "osd tier cache default hit set type '" << cache_hit_set_type << "' is not a known type"; err = -EINVAL; - goto reply; + goto reply_no_propose; } // go pg_pool_t *np = pending_inc.get_new_pool(pool_id, p); pg_pool_t *ntp = pending_inc.get_new_pool(tierpool_id, tp); if (np->tiers.count(tierpool_id) || ntp->is_tier()) { - wait_for_finished_proposal(op, new C_RetryMessage(this, op)); - return true; + goto wait; } np->tiers.insert(tierpool_id); np->read_tier = np->write_tier = tierpool_id; @@ -13867,7 +13860,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (pool_id < 0) { ss << "unrecognized pool '" << poolstr << "'"; err = -ENOENT; - goto reply; + goto reply_no_propose; } string field; @@ -13875,7 +13868,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (field != "max_objects" && field != "max_bytes") { ss << "unrecognized field '" << field << "'; should be 'max_bytes' or 'max_objects'"; err = -EINVAL; - goto reply; + goto reply_no_propose; } // val could contain unit designations, so we treat as a string @@ -13893,7 +13886,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (!tss.empty()) { ss << "error parsing value '" << val << "': " << tss; err = -EINVAL; - goto reply; + goto reply_no_propose; } pg_pool_t *pi = pending_inc.get_new_pool(pool_id, osdmap.get_pg_pool(pool_id)); @@ -13917,7 +13910,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, if (err == -EAGAIN) { goto wait; } else if (err < 0) { - goto reply; + goto reply_no_propose; } else { goto update; } @@ -13926,7 +13919,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, string pgidstr; err = parse_pgid(cmdmap, ss, pgid, pgidstr); if (err < 0) - goto reply; + goto reply_no_propose; bool sure = false; cmd_getval(cmdmap, "yes_i_really_mean_it", sure); if (!sure) { @@ -13936,7 +13929,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << "willing to accept that the data is permanently destroyed. Pass " << "--yes-i-really-mean-it to proceed."; err = -EPERM; - goto reply; + goto reply_no_propose; } bool creating_now; { @@ -13961,7 +13954,7 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, } else { ss << "pg " << pgid << " already creating"; err = 0; - goto reply; + goto reply_no_propose; } } else if (prefix == "osd force_healthy_stretch_mode") { bool sure = false; @@ -13971,12 +13964,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, "(probably two data centers or availability zones?) and may result in PGs " "going inactive until backfilling is complete. Pass --yes-i-really-mean-it to proceed."; err = -EPERM; - goto reply; + goto reply_no_propose; } try_end_recovery_stretch_mode(true); ss << "Triggering healthy stretch mode"; err = 0; - goto reply; + goto reply_no_propose; } else if (prefix == "osd force_recovery_stretch_mode") { bool sure = false; cmd_getval(cmdmap, "yes_i_really_mean_it", sure); @@ -13986,12 +13979,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, "availability zones?) and should have happened automatically" "Pass --yes-i-really-mean-it to proceed."; err = -EPERM; - goto reply; + goto reply_no_propose; } mon.go_recovery_stretch_mode(); ss << "Triggering recovery stretch mode"; err = 0; - goto reply; + goto reply_no_propose; } else if (prefix == "osd set-allow-crimson") { bool sure = false; @@ -14007,12 +14000,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, << "If you are sure, add --yes-i-really-mean-it and add 'crimson' to " << "the experimental features config. This setting is irrevocable."; err = -EPERM; - goto reply; + goto reply_no_propose; } err = 0; if (osdmap.get_allow_crimson()) { - goto reply; + goto reply_no_propose; } else { pending_inc.set_allow_crimson(); goto update; @@ -14021,12 +14014,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, err = -EINVAL; } - reply: + reply_no_propose: getline(ss, rs); if (err < 0 && rs.length() == 0) rs = cpp_strerror(err); mon.reply_command(op, err, rs, rdata, get_last_committed()); - return ret; + return false; /* nothing to propose */ update: getline(ss, rs); @@ -14035,6 +14028,12 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, return true; wait: + // XXX + // Some osd commands split changes across two epochs. + // It seems this is mostly for crush rule changes. It doesn't need + // to be this way but it's a bit of work to fix that. For now, + // trigger a proposal by returning true and then retry the command + // to complete the operation. wait_for_finished_proposal(op, new C_RetryMessage(this, op)); return true; } diff --git a/ceph/src/mon/PGMap.cc b/ceph/src/mon/PGMap.cc index 190b93bb8..349c86583 100644 --- a/ceph/src/mon/PGMap.cc +++ b/ceph/src/mon/PGMap.cc @@ -1212,10 +1212,12 @@ void PGMap::apply_incremental(CephContext *cct, const Incremental& inc) stat_osd_sub(t->first, t->second); osd_stat.erase(t); } - for (auto i = pool_statfs.begin(); i != pool_statfs.end(); ++i) { + for (auto i = pool_statfs.begin(); i != pool_statfs.end();) { if (i->first.second == *p) { pg_pool_sum[i->first.first].sub(i->second); - pool_statfs.erase(i); + i = pool_statfs.erase(i); + } else { + ++i; } } } @@ -3337,19 +3339,9 @@ void PGMap::get_health_checks( for (auto &it : pools) { const pg_pool_t &pool = it.second; const string& pool_name = osdmap.get_pool_name(it.first); - auto it2 = pg_pool_sum.find(it.first); - if (it2 == pg_pool_sum.end()) { - continue; - } - const pool_stat_t *pstat = &it2->second; - if (pstat == nullptr) { - continue; - } - const object_stat_sum_t& sum = pstat->stats.sum; // application metadata is not encoded until luminous is minimum // required release - if (sum.num_objects > 0 && pool.application_metadata.empty() && - !pool.is_tier()) { + if (pool.application_metadata.empty() && !pool.is_tier()) { stringstream ss; ss << "application not enabled on pool '" << pool_name << "'"; detail.push_back(ss.str()); diff --git a/ceph/src/msg/Dispatcher.h b/ceph/src/msg/Dispatcher.h index 5e025437b..885f1843b 100644 --- a/ceph/src/msg/Dispatcher.h +++ b/ceph/src/msg/Dispatcher.h @@ -204,13 +204,16 @@ public: /** * handle successful authentication (msgr2) * - * Authenticated result/state will be attached to the Connection. + * Authenticated result/state will be attached to the Connection. This is + * called via the MonClient. + * + * Do not acquire locks in this method! It is considered "fast" delivery. * * return 1 for success * return 0 for no action (let another Dispatcher handle it) * return <0 for failure (failure to parse caps, for instance) */ - virtual int ms_handle_authentication(Connection *con) { + virtual int ms_handle_fast_authentication(Connection *con) { return 0; } diff --git a/ceph/src/msg/Message.cc b/ceph/src/msg/Message.cc index fdf32b5e0..348546abd 100644 --- a/ceph/src/msg/Message.cc +++ b/ceph/src/msg/Message.cc @@ -822,9 +822,6 @@ Message *decode_message(CephContext *cct, break; - case MSG_MDS_DENTRYUNLINK_ACK: - m = make_message(); - break; case MSG_MDS_DENTRYUNLINK: m = make_message(); break; diff --git a/ceph/src/msg/Message.h b/ceph/src/msg/Message.h index 9eec1c5bb..f27c5448e 100644 --- a/ceph/src/msg/Message.h +++ b/ceph/src/msg/Message.h @@ -178,7 +178,6 @@ #define MSG_MDS_OPENINOREPLY 0x210 #define MSG_MDS_SNAPUPDATE 0x211 #define MSG_MDS_FRAGMENTNOTIFYACK 0x212 -#define MSG_MDS_DENTRYUNLINK_ACK 0x213 #define MSG_MDS_LOCK 0x300 // 0x3xx are for locker of mds #define MSG_MDS_INODEFILECAPS 0x301 diff --git a/ceph/src/msg/async/AsyncConnection.cc b/ceph/src/msg/async/AsyncConnection.cc index 8051f5907..683be086e 100644 --- a/ceph/src/msg/async/AsyncConnection.cc +++ b/ceph/src/msg/async/AsyncConnection.cc @@ -116,6 +116,7 @@ AsyncConnection::AsyncConnection(CephContext *cct, AsyncMessenger *m, DispatchQu : Connection(cct, m), delay_state(NULL), async_msgr(m), conn_id(q->get_id()), logger(w->get_perf_counter()), + labeled_logger(w->get_labeled_perf_counter()), state(STATE_NONE), port(-1), dispatch_queue(q), recv_buf(NULL), recv_max_prefetch(std::max(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)), @@ -791,6 +792,7 @@ void AsyncConnection::tick(uint64_t id) << target_addr << ", fault." << dendl; protocol->fault(); + labeled_logger->inc(l_msgr_connection_ready_timeouts); } else { last_tick_id = center->create_time_event(connect_timeout_us, tick_handler); } @@ -803,6 +805,7 @@ void AsyncConnection::tick(uint64_t id) << " us, fault." << dendl; protocol->fault(); + labeled_logger->inc(l_msgr_connection_idle_timeouts); } else { last_tick_id = center->create_time_event(inactive_timeout_us, tick_handler); } diff --git a/ceph/src/msg/async/AsyncConnection.h b/ceph/src/msg/async/AsyncConnection.h index 82c29985b..78a590f8c 100644 --- a/ceph/src/msg/async/AsyncConnection.h +++ b/ceph/src/msg/async/AsyncConnection.h @@ -173,6 +173,7 @@ public: AsyncMessenger *async_msgr; uint64_t conn_id; PerfCounters *logger; + PerfCounters *labeled_logger; int state; ConnectedSocket cs; int port; diff --git a/ceph/src/msg/async/ProtocolV1.cc b/ceph/src/msg/async/ProtocolV1.cc index 9376d46b0..b45ad8ca5 100644 --- a/ceph/src/msg/async/ProtocolV1.cc +++ b/ceph/src/msg/async/ProtocolV1.cc @@ -2414,6 +2414,7 @@ CtPtr ProtocolV1::replace(const AsyncConnectionRef& existing, existing->worker->references--; new_worker->references++; existing->logger = new_worker->get_perf_counter(); + existing->labeled_logger = new_worker->get_labeled_perf_counter(); existing->worker = new_worker; existing->center = new_center; if (existing->delay_state) diff --git a/ceph/src/msg/async/ProtocolV2.cc b/ceph/src/msg/async/ProtocolV2.cc index 7cda9637d..08426b796 100644 --- a/ceph/src/msg/async/ProtocolV2.cc +++ b/ceph/src/msg/async/ProtocolV2.cc @@ -2808,6 +2808,7 @@ CtPtr ProtocolV2::reuse_connection(const AsyncConnectionRef& existing, existing->worker->references--; new_worker->references++; existing->logger = new_worker->get_perf_counter(); + existing->labeled_logger = new_worker->get_labeled_perf_counter(); existing->worker = new_worker; existing->center = new_center; if (existing->delay_state) diff --git a/ceph/src/msg/async/Stack.h b/ceph/src/msg/async/Stack.h index 376a87c72..6739968f4 100644 --- a/ceph/src/msg/async/Stack.h +++ b/ceph/src/msg/async/Stack.h @@ -17,10 +17,12 @@ #ifndef CEPH_MSG_ASYNC_STACK_H #define CEPH_MSG_ASYNC_STACK_H -#include "include/spinlock.h" #include "common/perf_counters.h" -#include "msg/msg_types.h" +#include "common/perf_counters_key.h" +#include "include/spinlock.h" #include "msg/async/Event.h" +#include "msg/msg_types.h" +#include class Worker; class ConnectedSocketImpl { @@ -214,6 +216,15 @@ enum { l_msgr_last, }; +enum { + l_msgr_labeled_first = l_msgr_last + 1, + + l_msgr_connection_ready_timeouts, + l_msgr_connection_idle_timeouts, + + l_msgr_labeled_last, +}; + class Worker { std::mutex init_lock; std::condition_variable init_cond; @@ -224,6 +235,7 @@ class Worker { CephContext *cct; PerfCounters *perf_logger; + PerfCounters *perf_labeled_logger; unsigned id; std::atomic_uint references; @@ -233,9 +245,11 @@ class Worker { Worker& operator=(const Worker&) = delete; Worker(CephContext *c, unsigned worker_id) - : cct(c), perf_logger(NULL), id(worker_id), references(0), center(c) { + : cct(c), id(worker_id), references(0), center(c) { char name[128]; - sprintf(name, "AsyncMessenger::Worker-%u", id); + char name_prefix[] = "AsyncMessenger::Worker"; + sprintf(name, "%s-%u", name_prefix, id); + // initialize perf_logger PerfCountersBuilder plb(cct, name, l_msgr_first, l_msgr_last); @@ -259,12 +273,35 @@ class Worker { perf_logger = plb.create_perf_counters(); cct->get_perfcounters_collection()->add(perf_logger); + + // Add labeled perfcounters + std::string labels = ceph::perf_counters::key_create( + name_prefix, {{"id", std::to_string(id)}}); + PerfCountersBuilder plb_labeled( + cct, labels, l_msgr_labeled_first, + l_msgr_labeled_last); + + plb_labeled.add_u64_counter( + l_msgr_connection_ready_timeouts, "msgr_connection_ready_timeouts", + "Number of not yet ready connections declared as dead", NULL, + PerfCountersBuilder::PRIO_USEFUL); + plb_labeled.add_u64_counter( + l_msgr_connection_idle_timeouts, "msgr_connection_idle_timeouts", + "Number of connections closed due to idleness", NULL, + PerfCountersBuilder::PRIO_USEFUL); + + perf_labeled_logger = plb_labeled.create_perf_counters(); + cct->get_perfcounters_collection()->add(perf_labeled_logger); } virtual ~Worker() { if (perf_logger) { cct->get_perfcounters_collection()->remove(perf_logger); delete perf_logger; } + if (perf_labeled_logger) { + cct->get_perfcounters_collection()->remove(perf_labeled_logger); + delete perf_labeled_logger; + } } virtual int listen(entity_addr_t &addr, unsigned addr_slot, @@ -275,6 +312,7 @@ class Worker { virtual void initialize() {} PerfCounters *get_perf_counter() { return perf_logger; } + PerfCounters *get_labeled_perf_counter() { return perf_labeled_logger; } void release_worker() { int oldref = references.fetch_sub(1); ceph_assert(oldref > 0); diff --git a/ceph/src/os/bluestore/AvlAllocator.cc b/ceph/src/os/bluestore/AvlAllocator.cc index 26eba36a0..afa541862 100644 --- a/ceph/src/os/bluestore/AvlAllocator.cc +++ b/ceph/src/os/bluestore/AvlAllocator.cc @@ -39,7 +39,7 @@ uint64_t AvlAllocator::_pick_block_after(uint64_t *cursor, uint64_t search_bytes = 0; auto rs_start = range_tree.lower_bound(range_t{*cursor, size}, compare); for (auto rs = rs_start; rs != range_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; *cursor = offset + size; if (offset + size <= rs->end) { return offset; @@ -59,7 +59,7 @@ uint64_t AvlAllocator::_pick_block_after(uint64_t *cursor, } // If we reached end, start from beginning till cursor. for (auto rs = range_tree.begin(); rs != rs_start; ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; *cursor = offset + size; if (offset + size <= rs->end) { return offset; @@ -82,7 +82,7 @@ uint64_t AvlAllocator::_pick_block_fits(uint64_t size, const auto compare = range_size_tree.key_comp(); auto rs_start = range_size_tree.lower_bound(range_t{0, size}, compare); for (auto rs = rs_start; rs != range_size_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; if (offset + size <= rs->end) { return offset; } diff --git a/ceph/src/os/bluestore/BlueFS.cc b/ceph/src/os/bluestore/BlueFS.cc index 8454ddaf8..710021f07 100644 --- a/ceph/src/os/bluestore/BlueFS.cc +++ b/ceph/src/os/bluestore/BlueFS.cc @@ -658,16 +658,24 @@ void BlueFS::_init_alloc() } logger->set(l_bluefs_wal_alloc_unit, wal_alloc_size); + + uint64_t shared_alloc_size = cct->_conf->bluefs_shared_alloc_size; + if (shared_alloc && shared_alloc->a) { + uint64_t unit = shared_alloc->a->get_block_size(); + shared_alloc_size = std::max( + unit, + shared_alloc_size); + ceph_assert(0 == p2phase(shared_alloc_size, unit)); + } if (bdev[BDEV_SLOW]) { alloc_size[BDEV_DB] = cct->_conf->bluefs_alloc_size; - alloc_size[BDEV_SLOW] = cct->_conf->bluefs_shared_alloc_size; - logger->set(l_bluefs_db_alloc_unit, cct->_conf->bluefs_alloc_size); - logger->set(l_bluefs_main_alloc_unit, cct->_conf->bluefs_shared_alloc_size); + alloc_size[BDEV_SLOW] = shared_alloc_size; } else { - alloc_size[BDEV_DB] = cct->_conf->bluefs_shared_alloc_size; - logger->set(l_bluefs_main_alloc_unit, 0); - logger->set(l_bluefs_db_alloc_unit, cct->_conf->bluefs_shared_alloc_size); + alloc_size[BDEV_DB] = shared_alloc_size; + alloc_size[BDEV_SLOW] = 0; } + logger->set(l_bluefs_db_alloc_unit, alloc_size[BDEV_DB]); + logger->set(l_bluefs_main_alloc_unit, alloc_size[BDEV_SLOW]); // new wal and db devices are never shared if (bdev[BDEV_NEWWAL]) { alloc_size[BDEV_NEWWAL] = cct->_conf->bluefs_alloc_size; @@ -681,13 +689,13 @@ void BlueFS::_init_alloc() continue; } ceph_assert(bdev[id]->get_size()); - ceph_assert(alloc_size[id]); if (is_shared_alloc(id)) { dout(1) << __func__ << " shared, id " << id << std::hex << ", capacity 0x" << bdev[id]->get_size() << ", block size 0x" << alloc_size[id] << std::dec << dendl; } else { + ceph_assert(alloc_size[id]); std::string name = "bluefs-"; const char* devnames[] = { "wal","db","slow" }; if (id <= BDEV_SLOW) diff --git a/ceph/src/os/bluestore/BlueStore.cc b/ceph/src/os/bluestore/BlueStore.cc index f011ab727..aa14d0204 100644 --- a/ceph/src/os/bluestore/BlueStore.cc +++ b/ceph/src/os/bluestore/BlueStore.cc @@ -11803,7 +11803,7 @@ int BlueStore::_collection_list( [&, start_time = mono_clock::now(), func_name = __func__] { log_latency_fn( func_name, - l_bluestore_remove_lat, + l_bluestore_clist_lat, mono_clock::now() - start_time, cct->_conf->bluestore_log_collection_list_age, [&](const ceph::timespan& lat) { diff --git a/ceph/src/os/bluestore/BtreeAllocator.cc b/ceph/src/os/bluestore/BtreeAllocator.cc index cf08d7ae7..2455ec111 100644 --- a/ceph/src/os/bluestore/BtreeAllocator.cc +++ b/ceph/src/os/bluestore/BtreeAllocator.cc @@ -25,7 +25,7 @@ uint64_t BtreeAllocator::_pick_block_after(uint64_t *cursor, { auto rs_start = range_tree.lower_bound(*cursor); for (auto rs = rs_start; rs != range_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->first, align); + uint64_t offset = rs->first; if (offset + size <= rs->second) { *cursor = offset + size; return offset; @@ -37,7 +37,7 @@ uint64_t BtreeAllocator::_pick_block_after(uint64_t *cursor, } // If we reached end, start from beginning till cursor. for (auto rs = range_tree.begin(); rs != rs_start; ++rs) { - uint64_t offset = p2roundup(rs->first, align); + uint64_t offset = rs->first; if (offset + size <= rs->second) { *cursor = offset + size; return offset; @@ -53,7 +53,7 @@ uint64_t BtreeAllocator::_pick_block_fits(uint64_t size, // the needs auto rs_start = range_size_tree.lower_bound(range_value_t{0,size}); for (auto rs = rs_start; rs != range_size_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; if (offset + size <= rs->start + rs->size) { return offset; } diff --git a/ceph/src/os/bluestore/StupidAllocator.cc b/ceph/src/os/bluestore/StupidAllocator.cc index 550024e67..8f74a499e 100644 --- a/ceph/src/os/bluestore/StupidAllocator.cc +++ b/ceph/src/os/bluestore/StupidAllocator.cc @@ -52,20 +52,6 @@ void StupidAllocator::_insert_free(uint64_t off, uint64_t len) } } -/// return the effective length of the extent if we align to alloc_unit -uint64_t StupidAllocator::_aligned_len( - StupidAllocator::interval_set_t::iterator p, - uint64_t alloc_unit) -{ - uint64_t skew = p.get_start() % alloc_unit; - if (skew) - skew = alloc_unit - skew; - if (skew > p.get_len()) - return 0; - else - return p.get_len() - skew; -} - int64_t StupidAllocator::allocate_int( uint64_t want_size, uint64_t alloc_unit, int64_t hint, uint64_t *offset, uint32_t *length) @@ -89,7 +75,7 @@ int64_t StupidAllocator::allocate_int( for (bin = orig_bin; bin < (int)free.size(); ++bin) { p = free[bin].lower_bound(hint); while (p != free[bin].end()) { - if (_aligned_len(p, alloc_unit) >= want_size) { + if (p.get_len() >= want_size) { goto found; } ++p; @@ -102,7 +88,7 @@ int64_t StupidAllocator::allocate_int( p = free[bin].begin(); auto end = hint ? free[bin].lower_bound(hint) : free[bin].end(); while (p != end) { - if (_aligned_len(p, alloc_unit) >= want_size) { + if (p.get_len() >= want_size) { goto found; } ++p; @@ -114,7 +100,7 @@ int64_t StupidAllocator::allocate_int( for (bin = orig_bin; bin >= 0; --bin) { p = free[bin].lower_bound(hint); while (p != free[bin].end()) { - if (_aligned_len(p, alloc_unit) >= alloc_unit) { + if (p.get_len() >= alloc_unit) { goto found; } ++p; @@ -127,7 +113,7 @@ int64_t StupidAllocator::allocate_int( p = free[bin].begin(); auto end = hint ? free[bin].lower_bound(hint) : free[bin].end(); while (p != end) { - if (_aligned_len(p, alloc_unit) >= alloc_unit) { + if (p.get_len() >= alloc_unit) { goto found; } ++p; @@ -137,11 +123,9 @@ int64_t StupidAllocator::allocate_int( return -ENOSPC; found: - uint64_t skew = p.get_start() % alloc_unit; - if (skew) - skew = alloc_unit - skew; - *offset = p.get_start() + skew; - *length = std::min(std::max(alloc_unit, want_size), p2align((p.get_len() - skew), alloc_unit)); + *offset = p.get_start(); + *length = std::min(std::max(alloc_unit, want_size), p2align(p.get_len(), alloc_unit)); + if (cct->_conf->bluestore_debug_small_allocations) { uint64_t max = alloc_unit * (rand() % cct->_conf->bluestore_debug_small_allocations); @@ -158,7 +142,7 @@ int64_t StupidAllocator::allocate_int( free[bin].erase(*offset, *length); uint64_t off, len; - if (*offset && free[bin].contains(*offset - skew - 1, &off, &len)) { + if (*offset && free[bin].contains(*offset - 1, &off, &len)) { int newbin = _choose_bin(len); if (newbin != bin) { ldout(cct, 30) << __func__ << " demoting 0x" << std::hex << off << "~" << len diff --git a/ceph/src/os/bluestore/StupidAllocator.h b/ceph/src/os/bluestore/StupidAllocator.h index 0d50d73f4..443b09135 100644 --- a/ceph/src/os/bluestore/StupidAllocator.h +++ b/ceph/src/os/bluestore/StupidAllocator.h @@ -31,10 +31,6 @@ class StupidAllocator : public Allocator { unsigned _choose_bin(uint64_t len); void _insert_free(uint64_t offset, uint64_t len); - uint64_t _aligned_len( - interval_set_t::iterator p, - uint64_t alloc_unit); - public: StupidAllocator(CephContext* cct, int64_t size, diff --git a/ceph/src/os/bluestore/bluestore_tool.cc b/ceph/src/os/bluestore/bluestore_tool.cc index 41775c8a3..e63616bdd 100644 --- a/ceph/src/os/bluestore/bluestore_tool.cc +++ b/ceph/src/os/bluestore/bluestore_tool.cc @@ -878,17 +878,17 @@ int main(int argc, char **argv) return {"", false}; } std::error_code ec; - fs::path target_path = fs::weakly_canonical(fs::path{dev_target}, ec); + fs::path target = fs::weakly_canonical(fs::path{dev_target}, ec); if (ec) { cerr << "failed to retrieve absolute path for " << dev_target << ": " << ec.message() << std::endl; exit(EXIT_FAILURE); } - return {target_path.native(), - (fs::exists(target_path) && - fs::is_regular_file(target_path) && - fs::file_size(target_path) > 0)}; + return {target.native(), + fs::exists(target) && + (fs::is_block_file(target) || + (fs::is_regular_file(target) && fs::file_size(target) > 0))}; }(); // Attach either DB or WAL volume, create if needed // check if we need additional size specification diff --git a/ceph/src/os/bluestore/fastbmap_allocator_impl.cc b/ceph/src/os/bluestore/fastbmap_allocator_impl.cc index 595b12485..4f735ba2e 100644 --- a/ceph/src/os/bluestore/fastbmap_allocator_impl.cc +++ b/ceph/src/os/bluestore/fastbmap_allocator_impl.cc @@ -17,19 +17,9 @@ uint64_t AllocatorLevel::l2_allocs = 0; inline interval_t _align2units(uint64_t offset, uint64_t len, uint64_t min_length) { - interval_t res; - if (len >= min_length) { - res.offset = p2roundup(offset, min_length); - auto delta_off = res.offset - offset; - if (len > delta_off) { - res.length = len - delta_off; - res.length = p2align(res.length, min_length); - if (res.length) { - return res; - } - } - } - return interval_t(); + return len >= min_length ? + interval_t(offset, p2align(len, min_length)) : + interval_t(); } interval_t AllocatorLevel01Loose::_get_longest_from_l0(uint64_t pos0, diff --git a/ceph/src/osd/OSD.cc b/ceph/src/osd/OSD.cc index 222164dd8..c61e7d332 100644 --- a/ceph/src/osd/OSD.cc +++ b/ceph/src/osd/OSD.cc @@ -1418,14 +1418,14 @@ MOSDMap *OSDService::build_incremental_map_msg(epoch_t since, epoch_t to, for (epoch_t e = since + 1; e <= to; ++e) { bufferlist bl; if (get_inc_map_bl(e, bl)) { - m->incremental_maps[e] = std::move(bl); + m->incremental_maps[e] = bl; } else { dout(10) << __func__ << " missing incremental map " << e << dendl; if (!get_map_bl(e, bl)) { derr << __func__ << " also missing full map " << e << dendl; goto panic; } - m->maps[e] = std::move(bl); + m->maps[e] = bl; } max--; max_bytes -= bl.length(); @@ -7444,7 +7444,7 @@ void OSD::ms_fast_dispatch(Message *m) OID_EVENT_TRACE_WITH_MSG(m, "MS_FAST_DISPATCH_END", false); } -int OSD::ms_handle_authentication(Connection *con) +int OSD::ms_handle_fast_authentication(Connection *con) { int ret = 0; auto s = ceph::ref_cast(con->get_priv()); diff --git a/ceph/src/osd/OSD.h b/ceph/src/osd/OSD.h index 1b714d3bd..00fab7ec8 100644 --- a/ceph/src/osd/OSD.h +++ b/ceph/src/osd/OSD.h @@ -1525,7 +1525,7 @@ public: bool ms_handle_refused(Connection *con) override { return osd->ms_handle_refused(con); } - int ms_handle_authentication(Connection *con) override { + int ms_handle_fast_authentication(Connection *con) override { return true; } } heartbeat_dispatcher; @@ -1948,7 +1948,7 @@ private: void ms_handle_connect(Connection *con) override; void ms_handle_fast_connect(Connection *con) override; void ms_handle_fast_accept(Connection *con) override; - int ms_handle_authentication(Connection *con) override; + int ms_handle_fast_authentication(Connection *con) override; bool ms_handle_reset(Connection *con) override; void ms_handle_remote_reset(Connection *con) override {} bool ms_handle_refused(Connection *con) override; diff --git a/ceph/src/osd/OSDMap.cc b/ceph/src/osd/OSDMap.cc index 710e39b21..7a97f33e3 100644 --- a/ceph/src/osd/OSDMap.cc +++ b/ceph/src/osd/OSDMap.cc @@ -2145,8 +2145,8 @@ bool OSDMap::check_pg_upmaps( << j->first << " " << j->second << dendl; to_cancel->push_back(pg); - } else { - //Josh--check partial no-op here. + } else if (newmap != j->second) { + // check partial no-op here. ldout(cct, 10) << __func__ << " simplifying partially no-op pg_upmap_items " << j->first << " " << j->second << " -> " << newmap @@ -4096,6 +4096,8 @@ string OSDMap::get_flag_string(unsigned f) s += ",purged_snapdirs"; if (f & CEPH_OSDMAP_PGLOG_HARDLIMIT) s += ",pglog_hardlimit"; + if (f & CEPH_OSDMAP_NOAUTOSCALE) + s += ",noautoscale"; if (s.length()) s.erase(0, 1); return s; @@ -4981,17 +4983,16 @@ int OSDMap::balance_primaries( map> acting_prims_by_osd; pgs_by_osd = tmp_osd_map.get_pgs_by_osd(cct, pid, &prim_pgs_by_osd, &acting_prims_by_osd); - // Transfer pgs into a map, `pgs_to_check`. This will tell us the total num_changes after all - // calculations have been finalized. - // Transfer osds into a set, `osds_to_check`. - // This is to avoid poor runtime when we loop through the pgs and to set up - // our call to calc_desired_primary_distribution. + // Construct information about the pgs and osds we will consider in new primary mappings, + // as well as a map of all pgs and their original primary osds. map prim_pgs_to_check; vector osds_to_check; + map orig_prims; for (const auto & [osd, pgs] : prim_pgs_by_osd) { osds_to_check.push_back(osd); for (const auto & pg : pgs) { prim_pgs_to_check.insert({pg, false}); + orig_prims.insert({pg, osd}); } } @@ -5065,9 +5066,14 @@ int OSDMap::balance_primaries( prim_dist_scores[up_primary] -= 1; // Update the mappings - pending_inc->new_pg_upmap_primary[pg] = curr_best_osd; tmp_osd_map.pg_upmap_primaries[pg] = curr_best_osd; - prim_pgs_to_check[pg] = true; // mark that this pg changed mappings + if (curr_best_osd == orig_prims[pg]) { + pending_inc->new_pg_upmap_primary.erase(pg); + prim_pgs_to_check[pg] = false; + } else { + pending_inc->new_pg_upmap_primary[pg] = curr_best_osd; + prim_pgs_to_check[pg] = true; // mark that this pg changed mappings + } curr_num_changes++; } @@ -7201,6 +7207,24 @@ void OSDMap::check_health(CephContext *cct, ss.str(), 0); } } + // UNEQUAL_WEIGHT + if (stretch_mode_enabled) { + vector subtrees; + crush->get_subtree_of_type(stretch_mode_bucket, &subtrees); + if (subtrees.size() != 2) { + stringstream ss; + ss << "Stretch mode buckets != 2"; + checks->add("INCORRECT_NUM_BUCKETS_STRETCH_MODE", HEALTH_WARN, ss.str(), 0); + return; + } + int weight1 = crush->get_item_weight(subtrees[0]); + int weight2 = crush->get_item_weight(subtrees[1]); + stringstream ss; + if (weight1 != weight2) { + ss << "Stretch mode buckets have different weights!"; + checks->add("UNEVEN_WEIGHTS_STRETCH_MODE", HEALTH_WARN, ss.str(), 0); + } + } } int OSDMap::parse_osd_id_list(const vector& ls, set *out, diff --git a/ceph/src/osd/OpRequest.cc b/ceph/src/osd/OpRequest.cc index cd62c922d..59889fc53 100644 --- a/ceph/src/osd/OpRequest.cc +++ b/ceph/src/osd/OpRequest.cc @@ -88,7 +88,7 @@ void OpRequest::_dump(Formatter *f) const } } -void OpRequest::_dump_op_descriptor_unlocked(ostream& stream) const +void OpRequest::_dump_op_descriptor(ostream& stream) const { get_req()->print(stream); } @@ -121,6 +121,7 @@ void OpRequest::mark_flag_point(uint8_t flag, const char *s) { uint8_t old_flags = hit_flag_points; #endif mark_event(s); + last_event_detail = s; hit_flag_points |= flag; latest_flag_point = flag; tracepoint(oprequest, mark_flag_point, reqid.name._type, diff --git a/ceph/src/osd/OpRequest.h b/ceph/src/osd/OpRequest.h index e0bc232a5..1a608b583 100644 --- a/ceph/src/osd/OpRequest.h +++ b/ceph/src/osd/OpRequest.h @@ -63,6 +63,7 @@ private: entity_inst_t req_src_inst; uint8_t hit_flag_points; uint8_t latest_flag_point; + const char* last_event_detail = nullptr; utime_t dequeued_time; static const uint8_t flag_queued_for_pg=1 << 0; static const uint8_t flag_reached_pg = 1 << 1; @@ -74,7 +75,7 @@ private: OpRequest(Message *req, OpTracker *tracker); protected: - void _dump_op_descriptor_unlocked(std::ostream& stream) const override; + void _dump_op_descriptor(std::ostream& stream) const override; void _unregistered() override; bool filter_out(const std::set& filters) override; @@ -107,11 +108,11 @@ public: return latest_flag_point; } - std::string_view state_string() const override { + std::string _get_state_string() const override { switch(latest_flag_point) { case flag_queued_for_pg: return "queued for pg"; case flag_reached_pg: return "reached pg"; - case flag_delayed: return "delayed"; + case flag_delayed: return last_event_detail; case flag_started: return "started"; case flag_sub_op_sent: return "waiting for sub ops"; case flag_commit_sent: return "commit sent; apply or cleanup"; @@ -152,8 +153,8 @@ public: void mark_reached_pg() { mark_flag_point(flag_reached_pg, "reached_pg"); } - void mark_delayed(const std::string& s) { - mark_flag_point_string(flag_delayed, s); + void mark_delayed(const char* s) { + mark_flag_point(flag_delayed, s); } void mark_started() { mark_flag_point(flag_started, "started"); diff --git a/ceph/src/osd/object_state_fmt.h b/ceph/src/osd/object_state_fmt.h new file mode 100644 index 000000000..6b6350b94 --- /dev/null +++ b/ceph/src/osd/object_state_fmt.h @@ -0,0 +1,23 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#pragma once +/** + * \file fmtlib formatters for some types.h classes + */ + +#include "osd/object_state.h" +#include "osd/osd_types_fmt.h" +#if FMT_VERSION >= 90000 +#include +#endif + +template <> +struct fmt::formatter { + constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); } + + template + auto format(const ObjectState& os, FormatContext& ctx) const + { + return fmt::format_to(ctx.out(), "exists {} oi {}", os.exists, os.oi); + } +}; diff --git a/ceph/src/osd/scheduler/mClockScheduler.cc b/ceph/src/osd/scheduler/mClockScheduler.cc index 8f76e5165..0ea519655 100644 --- a/ceph/src/osd/scheduler/mClockScheduler.cc +++ b/ceph/src/osd/scheduler/mClockScheduler.cc @@ -340,8 +340,7 @@ uint32_t mClockScheduler::calc_scaled_cost(int item_cost) item_cost)); auto cost_per_io = static_cast(osd_bandwidth_cost_per_io); - // Calculate total scaled cost in bytes - return cost_per_io + cost; + return std::max(cost, cost_per_io); } void mClockScheduler::update_configuration() diff --git a/ceph/src/osd/scheduler/mClockScheduler.h b/ceph/src/osd/scheduler/mClockScheduler.h index 3d958bed9..f708b1d7a 100644 --- a/ceph/src/osd/scheduler/mClockScheduler.h +++ b/ceph/src/osd/scheduler/mClockScheduler.h @@ -38,12 +38,30 @@ constexpr double default_max = std::numeric_limits::is_iec559 ? std::numeric_limits::infinity() : std::numeric_limits::max(); -using client_id_t = uint64_t; -using profile_id_t = uint64_t; - +/** + * client_profile_id_t + * + * client_id - global id (client.####) for client QoS + * profile_id - id generated by client's QoS profile + * + * Currently (Reef and below), both members are set to + * 0 which ensures that all external clients share the + * mClock profile allocated reservation and limit + * bandwidth. + * + * Note: Post Reef, both members will be set to non-zero + * values when the distributed feature of the mClock + * algorithm is utilized. + */ struct client_profile_id_t { - client_id_t client_id; - profile_id_t profile_id; + uint64_t client_id = 0; + uint64_t profile_id = 0; + + client_profile_id_t(uint64_t _client_id, uint64_t _profile_id) : + client_id(_client_id), + profile_id(_profile_id) {} + + client_profile_id_t() = default; auto operator<=>(const client_profile_id_t&) const = default; friend std::ostream& operator<<(std::ostream& out, @@ -177,10 +195,7 @@ class mClockScheduler : public OpScheduler, md_config_obs_t { static scheduler_id_t get_scheduler_id(const OpSchedulerItem &item) { return scheduler_id_t{ item.get_scheduler_class(), - client_profile_id_t{ - item.get_owner(), - 0 - } + client_profile_id_t() }; } diff --git a/ceph/src/perfglue/CMakeLists.txt b/ceph/src/perfglue/CMakeLists.txt index 66e2f2bf9..9ad73d576 100644 --- a/ceph/src/perfglue/CMakeLists.txt +++ b/ceph/src/perfglue/CMakeLists.txt @@ -1,4 +1,4 @@ -if(ALLOCATOR STREQUAL "tcmalloc" AND NOT WITH_SEASTAR) +if(ALLOCATOR STREQUAL "tcmalloc") add_library(heap_profiler STATIC heap_profiler.cc) target_link_libraries(heap_profiler diff --git a/ceph/src/pybind/cephfs/c_cephfs.pxd b/ceph/src/pybind/cephfs/c_cephfs.pxd index 4636b4bf4..69d24912b 100644 --- a/ceph/src/pybind/cephfs/c_cephfs.pxd +++ b/ceph/src/pybind/cephfs/c_cephfs.pxd @@ -36,6 +36,13 @@ cdef extern from "cephfs/libcephfs.h" nogil: size_t nr_snap_metadata snap_metadata *snap_metadata + cdef struct ceph_snapdiff_info: + pass + + cdef struct ceph_snapdiff_entry_t: + dirent dir_entry + uint64_t snapid + ctypedef void* rados_t const char *ceph_version(int *major, int *minor, int *patch) @@ -111,6 +118,14 @@ cdef extern from "cephfs/libcephfs.h" nogil: void ceph_seekdir(ceph_mount_info *cmount, ceph_dir_result *dirp, int64_t offset) int ceph_chdir(ceph_mount_info *cmount, const char *path) dirent * ceph_readdir(ceph_mount_info *cmount, ceph_dir_result *dirp) + int ceph_open_snapdiff(ceph_mount_info *cmount, + const char *root_path, + const char *rel_path, + const char *snap1, + const char *snap2, + ceph_snapdiff_info *out) + int ceph_readdir_snapdiff(ceph_snapdiff_info *snapdiff, ceph_snapdiff_entry_t *out); + int ceph_close_snapdiff(ceph_snapdiff_info *snapdiff) int ceph_rmdir(ceph_mount_info *cmount, const char *path) const char* ceph_getcwd(ceph_mount_info *cmount) int ceph_sync_fs(ceph_mount_info *cmount) diff --git a/ceph/src/pybind/cephfs/cephfs.pyx b/ceph/src/pybind/cephfs/cephfs.pyx index fca3846de..793d88b98 100644 --- a/ceph/src/pybind/cephfs/cephfs.pyx +++ b/ceph/src/pybind/cephfs/cephfs.pyx @@ -57,6 +57,8 @@ CEPH_SETATTR_SIZE = 0x20 CEPH_SETATTR_CTIME = 0x40 CEPH_SETATTR_BTIME = 0x200 +CEPH_NOSNAP = -2 + # errno definitions cdef enum: CEPHFS_EBLOCKLISTED = 108 @@ -219,7 +221,7 @@ cdef make_ex(ret, msg): class DirEntry(namedtuple('DirEntry', - ['d_ino', 'd_off', 'd_reclen', 'd_type', 'd_name'])): + ['d_ino', 'd_off', 'd_reclen', 'd_type', 'd_name', 'd_snapid'])): DT_DIR = 0x4 DT_REG = 0x8 DT_LNK = 0xA @@ -277,13 +279,15 @@ cdef class DirResult(object): d_off=0, d_reclen=dirent.d_reclen, d_type=dirent.d_type, - d_name=dirent.d_name) + d_name=dirent.d_name, + d_snapid=CEPH_NOSNAP) ELSE: return DirEntry(d_ino=dirent.d_ino, d_off=dirent.d_off, d_reclen=dirent.d_reclen, d_type=dirent.d_type, - d_name=dirent.d_name) + d_name=dirent.d_name, + d_snapid=CEPH_NOSNAP) def close(self): if self.handle: @@ -321,6 +325,56 @@ cdef class DirResult(object): with nogil: ceph_seekdir(self.lib.cluster, self.handle, _offset) +cdef class SnapDiffHandle(object): + cdef LibCephFS lib + cdef ceph_snapdiff_info handle + cdef int opened + + def __cinit__(self, _lib): + self.opened = 0 + self.lib = _lib + + def __dealloc__(self): + self.close() + + def readdir(self): + self.lib.require_state("mounted") + + cdef: + ceph_snapdiff_entry_t difent + with nogil: + ret = ceph_readdir_snapdiff(&self.handle, &difent) + if ret < 0: + raise make_ex(ret, "ceph_readdir_snapdiff failed, ret {}" + .format(ret)) + if ret == 0: + return None + + IF UNAME_SYSNAME == "FreeBSD" or UNAME_SYSNAME == "Darwin": + return DirEntry(d_ino=difent.dir_entry.d_ino, + d_off=0, + d_reclen=difent.dir_entry.d_reclen, + d_type=difent.dir_entry.d_type, + d_name=difent.dir_entry.d_name, + d_snapid=difent.snapid) + ELSE: + return DirEntry(d_ino=difent.dir_entry.d_ino, + d_off=difent.dir_entry.d_off, + d_reclen=difent.dir_entry.d_reclen, + d_type=difent.dir_entry.d_type, + d_name=difent.dir_entry.d_name, + d_snapid=difent.snapid) + + def close(self): + if (not self.opened): + return + self.lib.require_state("mounted") + with nogil: + ret = ceph_close_snapdiff(&self.handle) + if ret < 0: + raise make_ex(ret, "closesnapdiff failed") + self.opened = 0 + def cstr(val, name, encoding="utf-8", opt=False) -> bytes: """ @@ -974,6 +1028,34 @@ cdef class LibCephFS(object): return handle.close() + def opensnapdiff(self, root_path, rel_path, snap1name, snap2name) -> SnapDiffHandle: + """ + Open the given directory. + + :param path: the path name of the directory to open. Must be either an absolute path + or a path relative to the current working directory. + :returns: the open directory stream handle + """ + self.require_state("mounted") + + h = SnapDiffHandle(self) + root = cstr(root_path, 'root') + relp = cstr(rel_path, 'relp') + snap1 = cstr(snap1name, 'snap1') + snap2 = cstr(snap2name, 'snap2') + cdef: + char* _root = root + char* _relp = relp + char* _snap1 = snap1 + char* _snap2 = snap2 + with nogil: + ret = ceph_open_snapdiff(self.cluster, _root, _relp, _snap1, _snap2, &h.handle); + if ret < 0: + raise make_ex(ret, "open_snapdiff failed for {} vs. {}" + .format(snap1.decode('utf-8'), snap2.decode('utf-8'))) + h.opened = 1 + return h + def rewinddir(self, DirResult handle): """ Rewind the directory stream to the beginning of the directory. diff --git a/ceph/src/pybind/cephfs/mock_cephfs.pxi b/ceph/src/pybind/cephfs/mock_cephfs.pxi index 1dec0d50d..54b27d04c 100644 --- a/ceph/src/pybind/cephfs/mock_cephfs.pxi +++ b/ceph/src/pybind/cephfs/mock_cephfs.pxi @@ -39,6 +39,12 @@ cdef nogil: size_t nr_snap_metadata snap_metadata *snap_metadata + cdef struct ceph_snapdiff_info: + int dummy + + cdef struct ceph_snapdiff_entry_t: + int dummy + ctypedef void* rados_t const char *ceph_version(int *major, int *minor, int *patch): @@ -175,6 +181,12 @@ cdef nogil: pass dirent * ceph_readdir(ceph_mount_info *cmount, ceph_dir_result *dirp): pass + int ceph_open_snapdiff(ceph_mount_info *cmount, const char *root_path, const char *rel_path, const char *snap1path, const char *snap2root, ceph_snapdiff_info *out): + pass + int ceph_readdir_snapdiff(ceph_snapdiff_info *snapdiff, ceph_snapdiff_entry_t *out): + pass + int ceph_close_snapdiff(ceph_snapdiff_info *snapdiff): + pass int ceph_rmdir(ceph_mount_info *cmount, const char *path): pass const char* ceph_getcwd(ceph_mount_info *cmount): diff --git a/ceph/src/pybind/mgr/ceph_module.pyi b/ceph/src/pybind/mgr/ceph_module.pyi index b89402d01..50147f08f 100644 --- a/ceph/src/pybind/mgr/ceph_module.pyi +++ b/ceph/src/pybind/mgr/ceph_module.pyi @@ -112,7 +112,7 @@ class BaseMgrModule(object): def _ceph_remove_mds_perf_query(self, query_id: int) -> None: ... def _ceph_reregister_mds_perf_queries(self) -> None: ... def _ceph_get_mds_perf_counters(self, query_id: int) -> Optional[Dict[str, List[PerfCounterT]]]: ... - def _ceph_unregister_client(self, addrs: str) -> None: ... - def _ceph_register_client(self, addrs: str) -> None: ... + def _ceph_unregister_client(self, name: Optional[str], addrs: str) -> None: ... + def _ceph_register_client(self, name: Optional[str], addrs: str, replace: Optional[bool]) -> None: ... def _ceph_is_authorized(self, arguments: Dict[str, str]) -> bool: ... def _ceph_get_daemon_health_metrics(self) -> Dict[str, List[Dict[str, Any]]]: ... diff --git a/ceph/src/pybind/mgr/cephadm/agent.py b/ceph/src/pybind/mgr/cephadm/agent.py index 41a0b5b0b..93a08cb34 100644 --- a/ceph/src/pybind/mgr/cephadm/agent.py +++ b/ceph/src/pybind/mgr/cephadm/agent.py @@ -337,7 +337,7 @@ class CephadmAgentHelpers: def _agent_down(self, host: str) -> bool: # if host is draining or drained (has _no_schedule label) there should not # be an agent deployed there and therefore we should return False - if host not in [h.hostname for h in self.mgr.cache.get_non_draining_hosts()]: + if self.mgr.cache.is_host_draining(host): return False # if we haven't deployed an agent on the host yet, don't say an agent is down if not self.mgr.cache.get_daemons_by_type('agent', host=host): diff --git a/ceph/src/pybind/mgr/cephadm/exchange.py b/ceph/src/pybind/mgr/cephadm/exchange.py new file mode 100644 index 000000000..76a613407 --- /dev/null +++ b/ceph/src/pybind/mgr/cephadm/exchange.py @@ -0,0 +1,164 @@ +# Data exchange formats for communicating more +# complex data structures between the cephadm binary +# an the mgr module. + +import json + +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + TypeVar, + Union, + cast, +) + + +FuncT = TypeVar("FuncT", bound=Callable) + + +class _DataField: + """A descriptor to map object fields into a data dictionary.""" + + def __init__( + self, + name: Optional[str] = None, + field_type: Optional[FuncT] = None, + ): + self.name = name + self.field_type = field_type + + def __set_name__(self, _: str, name: str) -> None: + if not self.name: + self.name = name + + def __get__(self, obj: Any, objtype: Any = None) -> Any: + return obj.data[self.name] + + def __set__(self, obj: Any, value: Any) -> None: + if self.field_type is not None: + obj.data[self.name] = self.field_type(value) + else: + obj.data[self.name] = value + + +def _get_data(obj: Any) -> Any: + """Wrapper to get underlying data dicts from objects that + advertise having them. + """ + _gd = getattr(obj, "get_data", None) + if _gd: + return _gd() + return obj + + +def _or_none(field_type: FuncT) -> FuncT: + def _field_type_or_none(value: Any) -> Any: + if value is None: + return None + return field_type(value) + + return cast(FuncT, _field_type_or_none) + + +class DeployMeta: + """Deployment metadata. Child of Deploy. Used by cephadm to + determine when certain changes have been made. + """ + + service_name = _DataField(field_type=str) + ports = _DataField(field_type=list) + ip = _DataField(field_type=_or_none(str)) + deployed_by = _DataField(field_type=_or_none(list)) + rank = _DataField(field_type=_or_none(int)) + rank_generation = _DataField(field_type=_or_none(int)) + extra_container_args = _DataField(field_type=_or_none(list)) + extra_entrypoint_args = _DataField(field_type=_or_none(list)) + + def __init__( + self, + init_data: Optional[Dict[str, Any]] = None, + *, + service_name: str = "", + ports: Optional[List[int]] = None, + ip: Optional[str] = None, + deployed_by: Optional[List[str]] = None, + rank: Optional[int] = None, + rank_generation: Optional[int] = None, + extra_container_args: Optional[List[Union[str, Dict[str, Any]]]] = None, + extra_entrypoint_args: Optional[List[Union[str, Dict[str, Any]]]] = None, + ): + self.data = dict(init_data or {}) + # set fields + self.service_name = service_name + self.ports = ports or [] + self.ip = ip + self.deployed_by = deployed_by + self.rank = rank + self.rank_generation = rank_generation + self.extra_container_args = extra_container_args + self.extra_entrypoint_args = extra_entrypoint_args + + def get_data(self) -> Dict[str, Any]: + return self.data + + to_simplified = get_data + + @classmethod + def convert( + cls, + value: Union[Dict[str, Any], "DeployMeta", None], + ) -> "DeployMeta": + if not isinstance(value, DeployMeta): + return cls(value) + return value + + +class Deploy: + """Set of fields that instructs cephadm to deploy a + service/daemon. + """ + + fsid = _DataField(field_type=str) + name = _DataField(field_type=str) + image = _DataField(field_type=str) + deploy_arguments = _DataField(field_type=list) + params = _DataField(field_type=dict) + meta = _DataField(field_type=DeployMeta.convert) + config_blobs = _DataField(field_type=dict) + + def __init__( + self, + init_data: Optional[Dict[str, Any]] = None, + *, + fsid: str = "", + name: str = "", + image: str = "", + deploy_arguments: Optional[List[str]] = None, + params: Optional[Dict[str, Any]] = None, + meta: Optional[DeployMeta] = None, + config_blobs: Optional[Dict[str, Any]] = None, + ): + self.data = dict(init_data or {}) + # set fields + self.fsid = fsid + self.name = name + self.image = image + self.deploy_arguments = deploy_arguments or [] + self.params = params or {} + self.meta = DeployMeta.convert(meta) + self.config_blobs = config_blobs or {} + + def get_data(self) -> Dict[str, Any]: + """Return the underlying data dict.""" + return self.data + + def to_simplified(self) -> Dict[str, Any]: + """Return a simplified serializable version of the object.""" + return {k: _get_data(v) for k, v in self.get_data().items()} + + def dump_json_str(self) -> str: + """Return the object's JSON string representation.""" + return json.dumps(self.to_simplified()) diff --git a/ceph/src/pybind/mgr/cephadm/inventory.py b/ceph/src/pybind/mgr/cephadm/inventory.py index 98c9a4880..7153ca6dc 100644 --- a/ceph/src/pybind/mgr/cephadm/inventory.py +++ b/ceph/src/pybind/mgr/cephadm/inventory.py @@ -17,7 +17,7 @@ from ceph.utils import str_to_datetime, datetime_to_str, datetime_now from orchestrator import OrchestratorError, HostSpec, OrchestratorEvent, service_to_daemon_types from cephadm.services.cephadmservice import CephadmDaemonDeploySpec -from .utils import resolve_ip +from .utils import resolve_ip, SpecialHostLabels from .migrations import queue_migrate_nfs_spec, queue_migrate_rgw_spec if TYPE_CHECKING: @@ -1003,29 +1003,60 @@ class HostCache(): h for h in self.mgr.inventory.all_specs() if ( self.host_had_daemon_refresh(h.hostname) - and '_no_schedule' not in h.labels + and SpecialHostLabels.DRAIN_DAEMONS not in h.labels + ) + ] + + def get_conf_keyring_available_hosts(self) -> List[HostSpec]: + """ + Returns all hosts without the drain conf and keyrings + label (SpecialHostLabels.DRAIN_CONF_KEYRING) that have + had a refresh. That is equivalent to all hosts we + consider eligible for deployment of conf and keyring files + + Any host without that label is considered fair game for + a client keyring spec to match. However, we want to still + wait for refresh here so that we know what keyrings we've + already deployed here + """ + return [ + h for h in self.mgr.inventory.all_specs() + if ( + self.host_had_daemon_refresh(h.hostname) + and SpecialHostLabels.DRAIN_CONF_KEYRING not in h.labels ) ] def get_non_draining_hosts(self) -> List[HostSpec]: """ - Returns all hosts that do not have _no_schedule label. + Returns all hosts that do not have drain daemon label + (SpecialHostLabels.DRAIN_DAEMONS). Useful for the agent who needs this specific list rather than the schedulable_hosts since the agent needs to be deployed on hosts with no daemon refresh """ return [ - h for h in self.mgr.inventory.all_specs() if '_no_schedule' not in h.labels + h for h in self.mgr.inventory.all_specs() if SpecialHostLabels.DRAIN_DAEMONS not in h.labels ] def get_draining_hosts(self) -> List[HostSpec]: """ - Returns all hosts that have _no_schedule label and therefore should have - no daemons placed on them, but are potentially still reachable + Returns all hosts that have the drain daemons label (SpecialHostLabels.DRAIN_DAEMONS) + and therefore should have no daemons placed on them, but are potentially still reachable """ return [ - h for h in self.mgr.inventory.all_specs() if '_no_schedule' in h.labels + h for h in self.mgr.inventory.all_specs() if SpecialHostLabels.DRAIN_DAEMONS in h.labels + ] + + def get_conf_keyring_draining_hosts(self) -> List[HostSpec]: + """ + Returns all hosts that have drain conf and keyrings label (SpecialHostLabels.DRAIN_CONF_KEYRING) + and therefore should have no config files or client keyring placed on them, but are + potentially still reachable + """ + return [ + h for h in self.mgr.inventory.all_specs() if SpecialHostLabels.DRAIN_CONF_KEYRING in h.labels ] def get_unreachable_hosts(self) -> List[HostSpec]: @@ -1045,6 +1076,18 @@ class HostCache(): ) ] + def is_host_unreachable(self, hostname: str) -> bool: + # take hostname and return if it matches the hostname of an unreachable host + return hostname in [h.hostname for h in self.get_unreachable_hosts()] + + def is_host_schedulable(self, hostname: str) -> bool: + # take hostname and return if it matches the hostname of a schedulable host + return hostname in [h.hostname for h in self.get_schedulable_hosts()] + + def is_host_draining(self, hostname: str) -> bool: + # take hostname and return if it matches the hostname of a draining host + return hostname in [h.hostname for h in self.get_draining_hosts()] + def get_facts(self, host: str) -> Dict[str, Any]: return self.facts.get(host, {}) @@ -1294,8 +1337,7 @@ class HostCache(): return True def all_host_metadata_up_to_date(self) -> bool: - unreachables = [h.hostname for h in self.get_unreachable_hosts()] - if [h for h in self.get_hosts() if (not self.host_metadata_up_to_date(h) and h not in unreachables)]: + if [h for h in self.get_hosts() if (not self.host_metadata_up_to_date(h) and not self.is_host_unreachable(h))]: # this function is primarily for telling if it's safe to try and apply a service # spec. Since offline/maintenance hosts aren't considered in that process anyway # we don't want to return False if the host without up-to-date metadata is in one diff --git a/ceph/src/pybind/mgr/cephadm/migrations.py b/ceph/src/pybind/mgr/cephadm/migrations.py index 52a8605bc..27f777af6 100644 --- a/ceph/src/pybind/mgr/cephadm/migrations.py +++ b/ceph/src/pybind/mgr/cephadm/migrations.py @@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Iterator, Optional, Dict, Any, List from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec, RGWSpec from cephadm.schedule import HostAssignment +from cephadm.utils import SpecialHostLabels import rados from mgr_module import NFS_POOL_NAME @@ -308,7 +309,7 @@ class Migrations: if 'client.admin' not in self.mgr.keys.keys: self.mgr._client_keyring_set( entity='client.admin', - placement='label:_admin', + placement=f'label:{SpecialHostLabels.ADMIN}', ) return True diff --git a/ceph/src/pybind/mgr/cephadm/module.py b/ceph/src/pybind/mgr/cephadm/module.py index 37fe3b4ac..7b97ce74a 100644 --- a/ceph/src/pybind/mgr/cephadm/module.py +++ b/ceph/src/pybind/mgr/cephadm/module.py @@ -58,6 +58,7 @@ from .services.cephadmservice import MonService, MgrService, MdsService, RgwServ from .services.ingress import IngressService from .services.container import CustomContainerService from .services.iscsi import IscsiService +from .services.nvmeof import NvmeofService from .services.nfs import NFSService from .services.osd import OSDRemovalQueue, OSDService, OSD, NotFoundError from .services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \ @@ -69,7 +70,7 @@ from .inventory import Inventory, SpecStore, HostCache, AgentCache, EventStore, from .upgrade import CephadmUpgrade from .template import TemplateMgr from .utils import CEPH_IMAGE_TYPES, RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES, forall_hosts, \ - cephadmNoImage, CEPH_UPGRADE_ORDER + cephadmNoImage, CEPH_UPGRADE_ORDER, SpecialHostLabels from .configchecks import CephadmConfigChecks from .offline_watcher import OfflineHostWatcher from .tuned_profiles import TunedProfileUtils @@ -103,9 +104,10 @@ os._exit = os_exit_noop # type: ignore # Default container images ----------------------------------------------------- -DEFAULT_IMAGE = 'quay.io/ceph/ceph:v18' +DEFAULT_IMAGE = 'quay.io/ceph/ceph' # DO NOT ADD TAG TO THIS DEFAULT_PROMETHEUS_IMAGE = 'quay.io/prometheus/prometheus:v2.43.0' DEFAULT_NODE_EXPORTER_IMAGE = 'quay.io/prometheus/node-exporter:v1.5.0' +DEFAULT_NVMEOF_IMAGE = 'quay.io/ceph/nvmeof:0.0.2' DEFAULT_LOKI_IMAGE = 'docker.io/grafana/loki:2.4.0' DEFAULT_PROMTAIL_IMAGE = 'docker.io/grafana/promtail:2.4.0' DEFAULT_ALERT_MANAGER_IMAGE = 'quay.io/prometheus/alertmanager:v0.25.0' @@ -165,6 +167,13 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, default=False, desc='Use libstoragemgmt during device scans', ), + Option( + 'inventory_list_all', + type='bool', + default=False, + desc='Whether ceph-volume inventory should report ' + 'more devices (mostly mappers (LVs / mpaths), partitions...)', + ), Option( 'daemon_cache_timeout', type='secs', @@ -201,6 +210,11 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, default=DEFAULT_PROMETHEUS_IMAGE, desc='Prometheus container image', ), + Option( + 'container_image_nvmeof', + default=DEFAULT_NVMEOF_IMAGE, + desc='Nvme-of container image', + ), Option( 'container_image_grafana', default=DEFAULT_GRAFANA_IMAGE, @@ -445,30 +459,6 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, default=False, desc='Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level' ), - Option( - 'prometheus_web_user', - type='str', - default='admin', - desc='Prometheus web user' - ), - Option( - 'prometheus_web_password', - type='str', - default='admin', - desc='Prometheus web password' - ), - Option( - 'alertmanager_web_user', - type='str', - default='admin', - desc='Alertmanager web user' - ), - Option( - 'alertmanager_web_password', - type='str', - default='admin', - desc='Alertmanager web password' - ), Option( 'secure_monitoring_stack', type='bool', @@ -511,6 +501,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self.mode = '' self.container_image_base = '' self.container_image_prometheus = '' + self.container_image_nvmeof = '' self.container_image_grafana = '' self.container_image_alertmanager = '' self.container_image_node_exporter = '' @@ -549,19 +540,17 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self._temp_files: List = [] self.ssh_key: Optional[str] = None self.ssh_pub: Optional[str] = None + self.ssh_cert: Optional[str] = None self.use_agent = False self.agent_refresh_rate = 0 self.agent_down_multiplier = 0.0 self.agent_starting_port = 0 self.service_discovery_port = 0 self.secure_monitoring_stack = False - self.prometheus_web_password: Optional[str] = None - self.prometheus_web_user: Optional[str] = None - self.alertmanager_web_password: Optional[str] = None - self.alertmanager_web_user: Optional[str] = None self.apply_spec_fails: List[Tuple[str, str]] = [] self.max_osd_draining_count = 10 self.device_enhanced_scan = False + self.inventory_list_all = False self.cgroups_split = True self.log_refresh_metadata = False self.default_cephadm_command_timeout = 0 @@ -630,7 +619,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, OSDService, NFSService, MonService, MgrService, MdsService, RgwService, RbdMirrorService, GrafanaService, AlertmanagerService, PrometheusService, NodeExporterService, LokiService, PromtailService, CrashService, IscsiService, - IngressService, CustomContainerService, CephfsMirrorService, + IngressService, CustomContainerService, CephfsMirrorService, NvmeofService, CephadmAgent, CephExporterService, SNMPGatewayService, ElasticSearchService, JaegerQueryService, JaegerAgentService, JaegerCollectorService ] @@ -642,6 +631,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self.mgr_service: MgrService = cast(MgrService, self.cephadm_services['mgr']) self.osd_service: OSDService = cast(OSDService, self.cephadm_services['osd']) self.iscsi_service: IscsiService = cast(IscsiService, self.cephadm_services['iscsi']) + self.nvmeof_service: NvmeofService = cast(NvmeofService, self.cephadm_services['nvmeof']) self.scheduled_async_actions: List[Callable] = [] @@ -869,6 +859,9 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, ssh_config_fname)) def _process_ls_output(self, host: str, ls: List[Dict[str, Any]]) -> None: + def _as_datetime(value: Optional[str]) -> Optional[datetime.datetime]: + return str_to_datetime(value) if value is not None else None + dm = {} for d in ls: if not d['style'].startswith('cephadm'): @@ -877,51 +870,56 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, continue if '.' not in d['name']: continue - sd = orchestrator.DaemonDescription() - sd.last_refresh = datetime_now() - for k in ['created', 'started', 'last_configured', 'last_deployed']: - v = d.get(k, None) - if v: - setattr(sd, k, str_to_datetime(d[k])) - sd.daemon_type = d['name'].split('.')[0] - if sd.daemon_type not in orchestrator.KNOWN_DAEMON_TYPES: - logger.warning(f"Found unknown daemon type {sd.daemon_type} on host {host}") + daemon_type = d['name'].split('.')[0] + if daemon_type not in orchestrator.KNOWN_DAEMON_TYPES: + logger.warning(f"Found unknown daemon type {daemon_type} on host {host}") continue - sd.daemon_id = '.'.join(d['name'].split('.')[1:]) - sd.hostname = host - sd.container_id = d.get('container_id') - if sd.container_id: + container_id = d.get('container_id') + if container_id: # shorten the hash - sd.container_id = sd.container_id[0:12] - sd.container_image_name = d.get('container_image_name') - sd.container_image_id = d.get('container_image_id') - sd.container_image_digests = d.get('container_image_digests') - sd.memory_usage = d.get('memory_usage') - sd.memory_request = d.get('memory_request') - sd.memory_limit = d.get('memory_limit') - sd.cpu_percentage = d.get('cpu_percentage') - sd._service_name = d.get('service_name') - sd.deployed_by = d.get('deployed_by') - sd.version = d.get('version') - sd.ports = d.get('ports') - sd.ip = d.get('ip') - sd.rank = int(d['rank']) if d.get('rank') is not None else None - sd.rank_generation = int(d['rank_generation']) if d.get( + container_id = container_id[0:12] + rank = int(d['rank']) if d.get('rank') is not None else None + rank_generation = int(d['rank_generation']) if d.get( 'rank_generation') is not None else None - sd.extra_container_args = d.get('extra_container_args') - sd.extra_entrypoint_args = d.get('extra_entrypoint_args') + status, status_desc = None, 'unknown' if 'state' in d: - sd.status_desc = d['state'] - sd.status = { + status_desc = d['state'] + status = { 'running': DaemonDescriptionStatus.running, 'stopped': DaemonDescriptionStatus.stopped, 'error': DaemonDescriptionStatus.error, 'unknown': DaemonDescriptionStatus.error, }[d['state']] - else: - sd.status_desc = 'unknown' - sd.status = None + sd = orchestrator.DaemonDescription( + daemon_type=daemon_type, + daemon_id='.'.join(d['name'].split('.')[1:]), + hostname=host, + container_id=container_id, + container_image_id=d.get('container_image_id'), + container_image_name=d.get('container_image_name'), + container_image_digests=d.get('container_image_digests'), + version=d.get('version'), + status=status, + status_desc=status_desc, + created=_as_datetime(d.get('created')), + started=_as_datetime(d.get('started')), + last_refresh=datetime_now(), + last_configured=_as_datetime(d.get('last_configured')), + last_deployed=_as_datetime(d.get('last_deployed')), + memory_usage=d.get('memory_usage'), + memory_request=d.get('memory_request'), + memory_limit=d.get('memory_limit'), + cpu_percentage=d.get('cpu_percentage'), + service_name=d.get('service_name'), + ports=d.get('ports'), + ip=d.get('ip'), + deployed_by=d.get('deployed_by'), + rank=rank, + rank_generation=rank_generation, + extra_container_args=d.get('extra_container_args'), + extra_entrypoint_args=d.get('extra_entrypoint_args'), + ) dm[sd.name()] = sd self.log.debug('Refreshed host %s daemons (%d)' % (host, len(dm))) self.cache.update_host_daemons(host, dm) @@ -1086,12 +1084,25 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self._validate_and_set_ssh_val('ssh_identity_pub', inbuf, old) return 0, "", "" + @orchestrator._cli_write_command( + 'cephadm set-signed-cert') + def _set_signed_cert(self, inbuf: Optional[str] = None) -> Tuple[int, str, str]: + """Set a signed cert if CA signed keys are being used (use -i )""" + if inbuf is None or len(inbuf) == 0: + return -errno.EINVAL, "", "empty cert file provided" + old = self.ssh_cert + if inbuf == old: + return 0, "value unchanged", "" + self._validate_and_set_ssh_val('ssh_identity_cert', inbuf, old) + return 0, "", "" + @orchestrator._cli_write_command( 'cephadm clear-key') def _clear_key(self) -> Tuple[int, str, str]: """Clear cluster SSH key""" self.set_store('ssh_identity_key', None) self.set_store('ssh_identity_pub', None) + self.set_store('ssh_identity_cert', None) self.ssh._reconfig_ssh() self.log.info('Cleared cluster SSH key') return 0, '', '' @@ -1105,6 +1116,15 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, else: return -errno.ENOENT, '', 'No cluster SSH key defined' + @orchestrator._cli_read_command( + 'cephadm get-signed-cert') + def _get_signed_cert(self) -> Tuple[int, str, str]: + """Show SSH signed cert for connecting to cluster hosts using CA signed keys""" + if self.ssh_cert: + return 0, self.ssh_cert, '' + else: + return -errno.ENOENT, '', 'No signed cert defined' + @orchestrator._cli_read_command( 'cephadm get-user') def _get_user(self) -> Tuple[int, str, str]: @@ -1191,7 +1211,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, if code: return 1, '', ('check-host failed:\n' + '\n'.join(err)) except ssh.HostConnectionError as e: - self.log.exception(f"check-host failed for '{host}' at addr ({e.addr}) due to connection failure: {str(e)}") + self.log.exception( + f"check-host failed for '{host}' at addr ({e.addr}) due to connection failure: {str(e)}") return 1, '', ('check-host failed:\n' + f"Failed to connect to {host} at address ({e.addr}): {str(e)}") except OrchestratorError: @@ -1472,6 +1493,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, )).strip() elif daemon_type == 'prometheus': image = self.container_image_prometheus + elif daemon_type == 'nvmeof': + image = self.container_image_nvmeof elif daemon_type == 'grafana': image = self.container_image_grafana elif daemon_type == 'alertmanager': @@ -1640,11 +1663,11 @@ Then run the following: # check, if there we're removing the last _admin host if not force: - p = PlacementSpec(label='_admin') + p = PlacementSpec(label=SpecialHostLabels.ADMIN) admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs()) if len(admin_hosts) == 1 and admin_hosts[0] == host: - raise OrchestratorValidationError(f"Host {host} is the last host with the '_admin'" - " label. Please add the '_admin' label to a host" + raise OrchestratorValidationError(f"Host {host} is the last host with the '{SpecialHostLabels.ADMIN}'" + f" label. Please add the '{SpecialHostLabels.ADMIN}' label to a host" " or add --force to this command") def run_cmd(cmd_args: dict) -> None: @@ -1662,7 +1685,8 @@ Then run the following: if d.daemon_type != 'osd': self.cephadm_services[daemon_type_to_service(str(d.daemon_type))].pre_remove(d) - self.cephadm_services[daemon_type_to_service(str(d.daemon_type))].post_remove(d, is_failed_deploy=False) + self.cephadm_services[daemon_type_to_service( + str(d.daemon_type))].post_remove(d, is_failed_deploy=False) else: cmd_args = { 'prefix': 'osd purge-actual', @@ -1680,7 +1704,8 @@ Then run the following: self.inventory.rm_host(host) self.cache.rm_host(host) self.ssh.reset_con(host) - self.offline_hosts_remove(host) # if host was in offline host list, we should remove it now. + # if host was in offline host list, we should remove it now. + self.offline_hosts_remove(host) self.event.set() # refresh stray health check self.log.info('Removed host %s' % host) return "Removed {} host '{}'".format('offline' if offline else '', host) @@ -1729,20 +1754,24 @@ Then run the following: def remove_host_label(self, host: str, label: str, force: bool = False) -> str: # if we remove the _admin label from the only host that has it we could end up # removing the only instance of the config and keyring and cause issues - if not force and label == '_admin': - p = PlacementSpec(label='_admin') + if not force and label == SpecialHostLabels.ADMIN: + p = PlacementSpec(label=SpecialHostLabels.ADMIN) admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs()) if len(admin_hosts) == 1 and admin_hosts[0] == host: - raise OrchestratorValidationError(f"Host {host} is the last host with the '_admin'" - " label.\nRemoving the _admin label from this host could cause the removal" + raise OrchestratorValidationError(f"Host {host} is the last host with the '{SpecialHostLabels.ADMIN}'" + f" label.\nRemoving the {SpecialHostLabels.ADMIN} label from this host could cause the removal" " of the last cluster config/keyring managed by cephadm.\n" - "It is recommended to add the _admin label to another host" + f"It is recommended to add the {SpecialHostLabels.ADMIN} label to another host" " before completing this operation.\nIf you're certain this is" " what you want rerun this command with --force.") - self.inventory.rm_label(host, label) - self.log.info('Removed label %s to host %s' % (label, host)) + if self.inventory.has_label(host, label): + self.inventory.rm_label(host, label) + msg = f'Removed label {label} from host {host}' + else: + msg = f"Host {host} does not have label '{label}'. Please use 'ceph orch host ls' to list all the labels." + self.log.info(msg) self._kick_serve_loop() - return 'Removed label %s from host %s' % (label, host) + return msg def _host_ok_to_stop(self, hostname: str, force: bool = False) -> Tuple[int, str]: self.log.debug("running host-ok-to-stop checks") @@ -2604,6 +2633,9 @@ Then run the following: daemon_names.append(dd.name()) return daemon_names + alertmanager_user, alertmanager_password = self._get_alertmanager_credentials() + prometheus_user, prometheus_password = self._get_prometheus_credentials() + deps = [] if daemon_type == 'haproxy': # because cephadm creates new daemon instances whenever @@ -2651,24 +2683,25 @@ Then run the following: # an explicit dependency is added for each service-type to force a reconfig # whenever the number of daemons for those service-type changes from 0 to greater # than zero and vice versa. - deps += [s for s in ['node-exporter', 'alertmanager'] if self.cache.get_daemons_by_service(s)] + deps += [s for s in ['node-exporter', 'alertmanager'] + if self.cache.get_daemons_by_service(s)] if len(self.cache.get_daemons_by_type('ingress')) > 0: deps.append('ingress') # add dependency on ceph-exporter daemons deps += [d.name() for d in self.cache.get_daemons_by_service('ceph-exporter')] if self.secure_monitoring_stack: - if self.prometheus_web_user and self.prometheus_web_password: - deps.append(f'{hash(self.prometheus_web_user + self.prometheus_web_password)}') - if self.alertmanager_web_user and self.alertmanager_web_password: - deps.append(f'{hash(self.alertmanager_web_user + self.alertmanager_web_password)}') + if prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') + if alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') elif daemon_type == 'grafana': deps += get_daemon_names(['prometheus', 'loki']) - if self.secure_monitoring_stack and self.prometheus_web_user and self.prometheus_web_password: - deps.append(f'{hash(self.prometheus_web_user + self.prometheus_web_password)}') + if self.secure_monitoring_stack and prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') elif daemon_type == 'alertmanager': deps += get_daemon_names(['mgr', 'alertmanager', 'snmp-gateway']) - if self.secure_monitoring_stack and self.alertmanager_web_user and self.alertmanager_web_password: - deps.append(f'{hash(self.alertmanager_web_user + self.alertmanager_web_password)}') + if self.secure_monitoring_stack and alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') elif daemon_type == 'promtail': deps += get_daemon_names(['loki']) else: @@ -2769,16 +2802,50 @@ Then run the following: self.events.from_orch_error(e) raise + def _get_alertmanager_credentials(self) -> Tuple[str, str]: + user = self.get_store(AlertmanagerService.USER_CFG_KEY) + password = self.get_store(AlertmanagerService.PASS_CFG_KEY) + if user is None or password is None: + user = 'admin' + password = 'admin' + self.set_store(AlertmanagerService.USER_CFG_KEY, user) + self.set_store(AlertmanagerService.PASS_CFG_KEY, password) + return (user, password) + + def _get_prometheus_credentials(self) -> Tuple[str, str]: + user = self.get_store(PrometheusService.USER_CFG_KEY) + password = self.get_store(PrometheusService.PASS_CFG_KEY) + if user is None or password is None: + user = 'admin' + password = 'admin' + self.set_store(PrometheusService.USER_CFG_KEY, user) + self.set_store(PrometheusService.PASS_CFG_KEY, password) + return (user, password) + + @handle_orch_error + def set_prometheus_access_info(self, user: str, password: str) -> str: + self.set_store(PrometheusService.USER_CFG_KEY, user) + self.set_store(PrometheusService.PASS_CFG_KEY, password) + return 'prometheus credentials updated correctly' + + @handle_orch_error + def set_alertmanager_access_info(self, user: str, password: str) -> str: + self.set_store(AlertmanagerService.USER_CFG_KEY, user) + self.set_store(AlertmanagerService.PASS_CFG_KEY, password) + return 'alertmanager credentials updated correctly' + @handle_orch_error def get_prometheus_access_info(self) -> Dict[str, str]: - return {'user': self.prometheus_web_user or '', - 'password': self.prometheus_web_password or '', + user, password = self._get_prometheus_credentials() + return {'user': user, + 'password': password, 'certificate': self.http_server.service_discovery.ssl_certs.get_root_cert()} @handle_orch_error def get_alertmanager_access_info(self) -> Dict[str, str]: - return {'user': self.alertmanager_web_user or '', - 'password': self.alertmanager_web_password or '', + user, password = self._get_alertmanager_credentials() + return {'user': user, + 'password': password, 'certificate': self.http_server.service_discovery.ssl_certs.get_root_cert()} @handle_orch_error @@ -2799,7 +2866,6 @@ Then run the following: def _get_candidate_hosts(self, placement: PlacementSpec) -> List[str]: """Return a list of candidate hosts according to the placement specification.""" all_hosts = self.cache.get_schedulable_hosts() - draining_hosts = [dh.hostname for dh in self.cache.get_draining_hosts()] candidates = [] if placement.hosts: candidates = [h.hostname for h in placement.hosts if h.hostname in placement.hosts] @@ -2809,7 +2875,7 @@ Then run the following: candidates = [x for x in placement.filter_matching_hostspecs(all_hosts)] elif (placement.count is not None or placement.count_per_host is not None): candidates = [x.hostname for x in all_hosts] - return [h for h in candidates if h not in draining_hosts] + return [h for h in candidates if not self.cache.is_host_draining(h)] def _validate_one_shot_placement_spec(self, spec: PlacementSpec) -> None: """Validate placement specification for TunedProfileSpec and ClientKeyringSpec.""" @@ -2966,6 +3032,7 @@ Then run the following: 'rgw': PlacementSpec(count=2), 'ingress': PlacementSpec(count=2), 'iscsi': PlacementSpec(count=1), + 'nvmeof': PlacementSpec(count=1), 'rbd-mirror': PlacementSpec(count=2), 'cephfs-mirror': PlacementSpec(count=1), 'nfs': PlacementSpec(count=1), @@ -3189,7 +3256,8 @@ Then run the following: if self.inventory.get_host_with_state("maintenance"): raise OrchestratorError("Upgrade aborted - you have host(s) in maintenance state") if self.offline_hosts: - raise OrchestratorError(f"Upgrade aborted - Some host(s) are currently offline: {self.offline_hosts}") + raise OrchestratorError( + f"Upgrade aborted - Some host(s) are currently offline: {self.offline_hosts}") if daemon_types is not None and services is not None: raise OrchestratorError('--daemon-types and --services are mutually exclusive') if daemon_types is not None: @@ -3296,8 +3364,7 @@ Then run the following: return self.to_remove_osds.all_osds() @handle_orch_error - def drain_host(self, hostname, force=False): - # type: (str, bool) -> str + def drain_host(self, hostname: str, force: bool = False, keep_conf_keyring: bool = False, zap_osd_devices: bool = False) -> str: """ Drain all daemons from a host. :param host: host name @@ -3306,22 +3373,24 @@ Then run the following: # if we drain the last admin host we could end up removing the only instance # of the config and keyring and cause issues if not force: - p = PlacementSpec(label='_admin') + p = PlacementSpec(label=SpecialHostLabels.ADMIN) admin_hosts = p.filter_matching_hostspecs(self.inventory.all_specs()) if len(admin_hosts) == 1 and admin_hosts[0] == hostname: - raise OrchestratorValidationError(f"Host {hostname} is the last host with the '_admin'" + raise OrchestratorValidationError(f"Host {hostname} is the last host with the '{SpecialHostLabels.ADMIN}'" " label.\nDraining this host could cause the removal" " of the last cluster config/keyring managed by cephadm.\n" - "It is recommended to add the _admin label to another host" + f"It is recommended to add the {SpecialHostLabels.ADMIN} label to another host" " before completing this operation.\nIf you're certain this is" " what you want rerun this command with --force.") self.add_host_label(hostname, '_no_schedule') + if not keep_conf_keyring: + self.add_host_label(hostname, SpecialHostLabels.DRAIN_CONF_KEYRING) daemons: List[orchestrator.DaemonDescription] = self.cache.get_daemons_by_host(hostname) osds_to_remove = [d.daemon_id for d in daemons if d.daemon_type == 'osd'] - self.remove_osds(osds_to_remove) + self.remove_osds(osds_to_remove, zap=zap_osd_devices) daemons_table = "" daemons_table += "{:<20} {:<15}\n".format("type", "id") diff --git a/ceph/src/pybind/mgr/cephadm/schedule.py b/ceph/src/pybind/mgr/cephadm/schedule.py index 888a2a033..6666d761e 100644 --- a/ceph/src/pybind/mgr/cephadm/schedule.py +++ b/ceph/src/pybind/mgr/cephadm/schedule.py @@ -148,7 +148,7 @@ class HostAssignment(object): daemons: List[orchestrator.DaemonDescription], related_service_daemons: Optional[List[DaemonDescription]] = None, networks: Dict[str, Dict[str, Dict[str, List[str]]]] = {}, - filter_new_host: Optional[Callable[[str], bool]] = None, + filter_new_host: Optional[Callable[[str, ServiceSpec], bool]] = None, allow_colo: bool = False, primary_daemon_type: Optional[str] = None, per_host_daemon_type: Optional[str] = None, @@ -451,7 +451,7 @@ class HostAssignment(object): old = ls.copy() ls = [] for h in old: - if self.filter_new_host(h.hostname): + if self.filter_new_host(h.hostname, self.spec): ls.append(h) if len(old) > len(ls): logger.debug('Filtered %s down to %s' % (old, ls)) diff --git a/ceph/src/pybind/mgr/cephadm/serve.py b/ceph/src/pybind/mgr/cephadm/serve.py index 877a00cf7..5dfdc27a3 100644 --- a/ceph/src/pybind/mgr/cephadm/serve.py +++ b/ceph/src/pybind/mgr/cephadm/serve.py @@ -6,11 +6,19 @@ import uuid import os from collections import defaultdict from typing import TYPE_CHECKING, Optional, List, cast, Dict, Any, Union, Tuple, Set, \ - DefaultDict + DefaultDict, Callable from ceph.deployment import inventory from ceph.deployment.drive_group import DriveGroupSpec -from ceph.deployment.service_spec import ServiceSpec, CustomContainerSpec, PlacementSpec, RGWSpec +from ceph.deployment.service_spec import ( + ArgumentList, + ArgumentSpec, + CustomContainerSpec, + PlacementSpec, + RGWSpec, + ServiceSpec, + IngressSpec, +) from ceph.utils import datetime_now import orchestrator @@ -20,11 +28,12 @@ from cephadm.services.cephadmservice import CephadmDaemonDeploySpec from cephadm.schedule import HostAssignment from cephadm.autotune import MemoryAutotuner from cephadm.utils import forall_hosts, cephadmNoImage, is_repo_digest, \ - CephadmNoImage, CEPH_TYPES, ContainerInspectInfo + CephadmNoImage, CEPH_TYPES, ContainerInspectInfo, SpecialHostLabels from mgr_module import MonCommandFailed from mgr_util import format_bytes, verify_tls, get_cert_issuer_info, ServerConfigException from . import utils +from . import exchange if TYPE_CHECKING: from cephadm.module import CephadmOrchestrator @@ -244,7 +253,7 @@ class CephadmServe: if ( not self.mgr.use_agent - or host not in [h.hostname for h in self.mgr.cache.get_non_draining_hosts()] + or self.mgr.cache.is_host_draining(host) or host in agents_down ): if self.mgr.cache.host_needs_daemon_refresh(host): @@ -295,7 +304,7 @@ class CephadmServe: if ( self.mgr.cache.host_needs_autotune_memory(host) - and not self.mgr.inventory.has_label(host, '_no_autotune_memory') + and not self.mgr.inventory.has_label(host, SpecialHostLabels.NO_MEMORY_AUTOTUNE) ): self.log.debug(f"autotuning memory for {host}") self._autotune_host_memory(host) @@ -370,11 +379,14 @@ class CephadmServe: def _refresh_host_devices(self, host: str) -> Optional[str]: with_lsm = self.mgr.device_enhanced_scan + list_all = self.mgr.inventory_list_all inventory_args = ['--', 'inventory', '--format=json-pretty', '--filter-for-batch'] if with_lsm: inventory_args.insert(-1, "--with-lsm") + if list_all: + inventory_args.insert(-1, "--list-all") try: try: @@ -687,8 +699,7 @@ class CephadmServe: public_networks = [x.strip() for x in out.split(',')] self.log.debug('mon public_network(s) is %s' % public_networks) - def matches_network(host): - # type: (str) -> bool + def matches_public_network(host: str, sspec: ServiceSpec) -> bool: # make sure the host has at least one network that belongs to some configured public network(s) for pn in public_networks: public_network = ipaddress.ip_network(pn) @@ -705,6 +716,40 @@ class CephadmServe: ) return False + def has_interface_for_vip(host: str, sspec: ServiceSpec) -> bool: + # make sure the host has an interface that can + # actually accomodate the VIP + if not sspec or sspec.service_type != 'ingress': + return True + ingress_spec = cast(IngressSpec, sspec) + virtual_ips = [] + if ingress_spec.virtual_ip: + virtual_ips.append(ingress_spec.virtual_ip) + elif ingress_spec.virtual_ips_list: + virtual_ips = ingress_spec.virtual_ips_list + for vip in virtual_ips: + found = False + bare_ip = str(vip).split('/')[0] + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if ifaces and ipaddress.ip_address(bare_ip) in ipaddress.ip_network(subnet): + # found matching interface for this IP, move on + self.log.debug( + f'{bare_ip} is in {subnet} on {host} interface {list(ifaces.keys())[0]}' + ) + found = True + break + if not found: + self.log.info( + f"Filtered out host {host}: Host has no interface available for VIP: {vip}" + ) + return False + return True + + host_filters: Dict[str, Callable[[str, ServiceSpec], bool]] = { + 'mon': matches_public_network, + 'ingress': has_interface_for_vip + } + rank_map = None if svc.ranked(): rank_map = self.mgr.spec_store[spec.service_name()].rank_map or {} @@ -717,10 +762,7 @@ class CephadmServe: daemons=daemons, related_service_daemons=related_service_daemons, networks=self.mgr.cache.networks, - filter_new_host=( - matches_network if service_type == 'mon' - else None - ), + filter_new_host=host_filters.get(service_type, None), allow_colo=svc.allow_colo(), primary_daemon_type=svc.primary_daemon_type(spec), per_host_daemon_type=svc.per_host_daemon_type(spec), @@ -912,7 +954,18 @@ class CephadmServe: while daemons_to_remove and not _ok_to_stop(daemons_to_remove): # let's find a subset that is ok-to-stop - daemons_to_remove.pop() + non_error_daemon_index = -1 + # prioritize removing daemons in error state + for i, dmon in enumerate(daemons_to_remove): + if dmon.status != DaemonDescriptionStatus.error: + non_error_daemon_index = i + break + if non_error_daemon_index != -1: + daemons_to_remove.pop(non_error_daemon_index) + else: + # all daemons in list are in error state + # we should be able to remove all of them + break for d in daemons_to_remove: r = True assert d.hostname is not None @@ -933,8 +986,7 @@ class CephadmServe: self.mgr.spec_store.mark_configured(spec.service_name()) if self.mgr.use_agent: # can only send ack to agents if we know for sure port they bound to - hosts_altered = set([h for h in hosts_altered if (h in self.mgr.agent_cache.agent_ports and h in [ - h2.hostname for h2 in self.mgr.cache.get_non_draining_hosts()])]) + hosts_altered = set([h for h in hosts_altered if (h in self.mgr.agent_cache.agent_ports and not self.mgr.cache.is_host_draining(h))]) self.mgr.agent_helpers._request_agent_acks(hosts_altered, increment=True) if r is None: @@ -1115,9 +1167,9 @@ class CephadmServe: pspec = PlacementSpec.from_string(self.mgr.manage_etc_ceph_ceph_conf_hosts) ha = HostAssignment( spec=ServiceSpec('mon', placement=pspec), - hosts=self.mgr.cache.get_schedulable_hosts(), + hosts=self.mgr.cache.get_conf_keyring_available_hosts(), unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), - draining_hosts=self.mgr.cache.get_draining_hosts(), + draining_hosts=self.mgr.cache.get_conf_keyring_draining_hosts(), daemons=[], networks=self.mgr.cache.networks, ) @@ -1146,9 +1198,9 @@ class CephadmServe: keyring.encode('utf-8')).digest()) ha = HostAssignment( spec=ServiceSpec('mon', placement=ks.placement), - hosts=self.mgr.cache.get_schedulable_hosts(), + hosts=self.mgr.cache.get_conf_keyring_available_hosts(), unreachable_hosts=self.mgr.cache.get_unreachable_hosts(), - draining_hosts=self.mgr.cache.get_draining_hosts(), + draining_hosts=self.mgr.cache.get_conf_keyring_draining_hosts(), daemons=[], networks=self.mgr.cache.networks, ) @@ -1183,7 +1235,7 @@ class CephadmServe: client_files: Dict[str, Dict[str, Tuple[int, int, int, bytes, str]]], host: str) -> None: updated_files = False - if host in self.mgr.offline_hosts: + if self.mgr.cache.is_host_unreachable(host): return old_files = self.mgr.cache.get_host_client_files(host).copy() for path, m in client_files.get(host, {}).items(): @@ -1214,6 +1266,7 @@ class CephadmServe: osd_uuid_map: Optional[Dict[str, Any]] = None, ) -> str: + daemon_params: Dict[str, Any] = {} with set_exception_subject('service', orchestrator.DaemonDescription( daemon_type=daemon_spec.daemon_type, daemon_id=daemon_spec.daemon_id, @@ -1224,6 +1277,7 @@ class CephadmServe: image = '' start_time = datetime_now() ports: List[int] = daemon_spec.ports if daemon_spec.ports else [] + port_ips: Dict[str, str] = daemon_spec.port_ips if daemon_spec.port_ips else {} if daemon_spec.daemon_type == 'container': spec = cast(CustomContainerSpec, @@ -1234,9 +1288,10 @@ class CephadmServe: # TCP port to open in the host firewall if len(ports) > 0: - daemon_spec.extra_args.extend([ - '--tcp-ports', ' '.join(map(str, ports)) - ]) + daemon_params['tcp_ports'] = list(ports) + + if port_ips: + daemon_params['port_ips'] = port_ips # osd deployments needs an --osd-uuid arg if daemon_spec.daemon_type == 'osd': @@ -1245,14 +1300,14 @@ class CephadmServe: osd_uuid = osd_uuid_map.get(daemon_spec.daemon_id) if not osd_uuid: raise OrchestratorError('osd.%s not in osdmap' % daemon_spec.daemon_id) - daemon_spec.extra_args.extend(['--osd-fsid', osd_uuid]) + daemon_params['osd_fsid'] = osd_uuid if reconfig: - daemon_spec.extra_args.append('--reconfig') + daemon_params['reconfig'] = True if self.mgr.allow_ptrace: - daemon_spec.extra_args.append('--allow-ptrace') + daemon_params['allow_ptrace'] = True - daemon_spec, extra_container_args, extra_entrypoint_args = self._setup_extra_deployment_args(daemon_spec) + daemon_spec, extra_container_args, extra_entrypoint_args = self._setup_extra_deployment_args(daemon_spec, daemon_params) if daemon_spec.service_name in self.mgr.spec_store: configs = self.mgr.spec_store[daemon_spec.service_name].spec.custom_configs @@ -1268,23 +1323,31 @@ class CephadmServe: daemon_spec.name(), daemon_spec.host)) out, err, code = await self._run_cephadm( - daemon_spec.host, daemon_spec.name(), 'deploy', - [ - '--name', daemon_spec.name(), - '--meta-json', json.dumps({ - 'service_name': daemon_spec.service_name, - 'ports': daemon_spec.ports, - 'ip': daemon_spec.ip, - 'deployed_by': self.mgr.get_active_mgr_digests(), - 'rank': daemon_spec.rank, - 'rank_generation': daemon_spec.rank_generation, - 'extra_container_args': extra_container_args, - 'extra_entrypoint_args': extra_entrypoint_args - }), - '--config-json', '-', - ] + daemon_spec.extra_args, - stdin=json.dumps(daemon_spec.final_config), - image=image, + daemon_spec.host, + daemon_spec.name(), + ['_orch', 'deploy'], + [], + stdin=exchange.Deploy( + fsid=self.mgr._cluster_fsid, + name=daemon_spec.name(), + image=image, + params=daemon_params, + meta=exchange.DeployMeta( + service_name=daemon_spec.service_name, + ports=daemon_spec.ports, + ip=daemon_spec.ip, + deployed_by=self.mgr.get_active_mgr_digests(), + rank=daemon_spec.rank, + rank_generation=daemon_spec.rank_generation, + extra_container_args=ArgumentSpec.map_json( + extra_container_args, + ), + extra_entrypoint_args=ArgumentSpec.map_json( + extra_entrypoint_args, + ), + ), + config_blobs=daemon_spec.final_config, + ).dump_json_str(), ) if daemon_spec.daemon_type == 'agent': @@ -1329,35 +1392,33 @@ class CephadmServe: self.mgr.cephadm_services[servict_type].post_remove(dd, is_failed_deploy=True) raise - def _setup_extra_deployment_args(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[CephadmDaemonDeploySpec, Optional[List[str]], Optional[List[str]]]: + def _setup_extra_deployment_args( + self, + daemon_spec: CephadmDaemonDeploySpec, + params: Dict[str, Any], + ) -> Tuple[CephadmDaemonDeploySpec, Optional[ArgumentList], Optional[ArgumentList]]: # this function is for handling any potential user specified # (in the service spec) extra runtime or entrypoint args for a daemon # we are going to deploy. Effectively just adds a set of extra args to # pass to the cephadm binary to indicate the daemon being deployed # needs extra runtime/entrypoint args. Returns the modified daemon spec # as well as what args were added (as those are included in unit.meta file) + def _to_args(lst: ArgumentList) -> List[str]: + out: List[str] = [] + for argspec in lst: + out.extend(argspec.to_args()) + return out + try: eca = daemon_spec.extra_container_args if eca: - for a in eca: - # args with spaces need to be split into multiple args - # in order to work properly - args = a.split(' ') - for arg in args: - if arg: - daemon_spec.extra_args.append(f'--extra-container-args={arg}') + params['extra_container_args'] = _to_args(eca) except AttributeError: eca = None try: eea = daemon_spec.extra_entrypoint_args if eea: - for a in eea: - # args with spaces need to be split into multiple args - # in order to work properly - args = a.split(' ') - for arg in args: - if arg: - daemon_spec.extra_args.append(f'--extra-entrypoint-args={arg}') + params['extra_entrypoint_args'] = _to_args(eea) except AttributeError: eea = None return daemon_spec, eca, eea @@ -1431,7 +1492,7 @@ class CephadmServe: async def _run_cephadm(self, host: str, entity: Union[CephadmNoImage, str], - command: str, + command: Union[str, List[str]], args: List[str], addr: Optional[str] = "", stdin: Optional[str] = "", @@ -1496,7 +1557,10 @@ class CephadmServe: final_args += ['--timeout', str(timeout)] # subcommand - final_args.append(command) + if isinstance(command, list): + final_args.extend([str(v) for v in command]) + else: + final_args.append(command) # subcommand args if not no_fsid: diff --git a/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py b/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py index 5ab8810db..7d7a04dad 100644 --- a/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py +++ b/ceph/src/pybind/mgr/cephadm/services/cephadmservice.py @@ -10,7 +10,14 @@ from typing import TYPE_CHECKING, List, Callable, TypeVar, \ from mgr_module import HandleCommandResult, MonCommandFailed -from ceph.deployment.service_spec import ServiceSpec, RGWSpec, CephExporterSpec, MONSpec +from ceph.deployment.service_spec import ( + ArgumentList, + CephExporterSpec, + GeneralArgList, + MONSpec, + RGWSpec, + ServiceSpec, +) from ceph.deployment.utils import is_ipv6, unwrap_ipv6 from mgr_util import build_url, merge_dicts from orchestrator import OrchestratorError, DaemonDescription, DaemonDescriptionStatus @@ -32,7 +39,7 @@ def get_auth_entity(daemon_type: str, daemon_id: str, host: str = "") -> AuthEnt """ # despite this mapping entity names to daemons, self.TYPE within # the CephService class refers to service types, not daemon types - if daemon_type in ['rgw', 'rbd-mirror', 'cephfs-mirror', 'nfs', "iscsi", 'ingress', 'ceph-exporter']: + if daemon_type in ['rgw', 'rbd-mirror', 'cephfs-mirror', 'nfs', "iscsi", 'nvmeof', 'ingress', 'ceph-exporter']: return AuthEntity(f'client.{daemon_type}.{daemon_id}') elif daemon_type in ['crash', 'agent']: if host == "": @@ -59,10 +66,11 @@ class CephadmDaemonDeploySpec: daemon_type: Optional[str] = None, ip: Optional[str] = None, ports: Optional[List[int]] = None, + port_ips: Optional[Dict[str, str]] = None, rank: Optional[int] = None, rank_generation: Optional[int] = None, - extra_container_args: Optional[List[str]] = None, - extra_entrypoint_args: Optional[List[str]] = None, + extra_container_args: Optional[ArgumentList] = None, + extra_entrypoint_args: Optional[ArgumentList] = None, ): """ A data struction to encapsulate `cephadm deploy ... @@ -80,14 +88,21 @@ class CephadmDaemonDeploySpec: # for run_cephadm. self.keyring: Optional[str] = keyring + # FIXME: finish removing this # For run_cephadm. Would be great to have more expressive names. - self.extra_args: List[str] = extra_args or [] + # self.extra_args: List[str] = extra_args or [] + assert not extra_args self.ceph_conf = ceph_conf self.extra_files = extra_files or {} # TCP ports used by the daemon self.ports: List[int] = ports or [] + # mapping of ports to IP addresses for ports + # we know we will only bind to on a specific IP. + # Useful for allowing multiple daemons to bind + # to the same port on different IPs on the same node + self.port_ips: Dict[str, str] = port_ips or {} self.ip: Optional[str] = ip # values to be populated during generate_config calls @@ -144,10 +159,14 @@ class CephadmDaemonDeploySpec: ports=self.ports, rank=self.rank, rank_generation=self.rank_generation, - extra_container_args=self.extra_container_args, - extra_entrypoint_args=self.extra_entrypoint_args, + extra_container_args=cast(GeneralArgList, self.extra_container_args), + extra_entrypoint_args=cast(GeneralArgList, self.extra_entrypoint_args), ) + @property + def extra_args(self) -> List[str]: + return [] + class CephadmService(metaclass=ABCMeta): """ diff --git a/ceph/src/pybind/mgr/cephadm/services/ingress.py b/ceph/src/pybind/mgr/cephadm/services/ingress.py index 7523bc086..55be30454 100644 --- a/ceph/src/pybind/mgr/cephadm/services/ingress.py +++ b/ceph/src/pybind/mgr/cephadm/services/ingress.py @@ -165,6 +165,14 @@ class IngressService(CephService): ] host_ip = daemon_spec.ip or self.mgr.inventory.get_addr(daemon_spec.host) + server_opts = [] + if spec.enable_haproxy_protocol: + server_opts.append("send-proxy-v2") + logger.debug("enabled default server opts: %r", server_opts) + ip = '*' if spec.virtual_ips_list else str(spec.virtual_ip).split('/')[0] or daemon_spec.ip or '*' + frontend_port = daemon_spec.ports[0] if daemon_spec.ports else spec.frontend_port + if ip != '*' and frontend_port: + daemon_spec.port_ips = {str(frontend_port): ip} haproxy_conf = self.mgr.template.render( 'services/ingress/haproxy.cfg.j2', { @@ -174,10 +182,11 @@ class IngressService(CephService): 'servers': servers, 'user': spec.monitor_user or 'admin', 'password': password, - 'ip': "*" if spec.virtual_ips_list else str(spec.virtual_ip).split('/')[0] or daemon_spec.ip or '*', - 'frontend_port': daemon_spec.ports[0] if daemon_spec.ports else spec.frontend_port, + 'ip': ip, + 'frontend_port': frontend_port, 'monitor_port': daemon_spec.ports[1] if daemon_spec.ports else spec.monitor_port, - 'local_host_ip': host_ip + 'local_host_ip': host_ip, + 'default_server_opts': server_opts, } ) config_files = { @@ -242,38 +251,35 @@ class IngressService(CephService): host = daemon_spec.host hosts = sorted(list(set([host] + [str(d.hostname) for d in daemons]))) - # interface - bare_ips = [] - if spec.virtual_ip: - bare_ips.append(str(spec.virtual_ip).split('/')[0]) - elif spec.virtual_ips_list: - bare_ips = [str(vip).split('/')[0] for vip in spec.virtual_ips_list] - interface = None - for bare_ip in bare_ips: + def _get_valid_interface_and_ip(vip: str, host: str) -> Tuple[str, str]: + # interface + bare_ip = ipaddress.ip_interface(vip).ip + host_ip = '' + interface = None for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): if ifaces and ipaddress.ip_address(bare_ip) in ipaddress.ip_network(subnet): interface = list(ifaces.keys())[0] + host_ip = ifaces[interface][0] logger.info( f'{bare_ip} is in {subnet} on {host} interface {interface}' ) break - else: # nobreak - continue - break - # try to find interface by matching spec.virtual_interface_networks - if not interface and spec.virtual_interface_networks: - for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): - if subnet in spec.virtual_interface_networks: - interface = list(ifaces.keys())[0] - logger.info( - f'{spec.virtual_ip} will be configured on {host} interface ' - f'{interface} (which has guiding subnet {subnet})' - ) - break - if not interface: - raise OrchestratorError( - f"Unable to identify interface for {spec.virtual_ip} on {host}" - ) + # try to find interface by matching spec.virtual_interface_networks + if not interface and spec.virtual_interface_networks: + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if subnet in spec.virtual_interface_networks: + interface = list(ifaces.keys())[0] + host_ip = ifaces[interface][0] + logger.info( + f'{spec.virtual_ip} will be configured on {host} interface ' + f'{interface} (which is in subnet {subnet})' + ) + break + if not interface: + raise OrchestratorError( + f"Unable to identify interface for {spec.virtual_ip} on {host}" + ) + return interface, host_ip # script to monitor health script = '/usr/bin/false' @@ -318,7 +324,36 @@ class IngressService(CephService): # other_ips in conf file and converter to ips if host in hosts: hosts.remove(host) - other_ips = [utils.resolve_ip(self.mgr.inventory.get_addr(h)) for h in hosts] + host_ips: List[str] = [] + other_ips: List[List[str]] = [] + interfaces: List[str] = [] + for vip in virtual_ips: + interface, ip = _get_valid_interface_and_ip(vip, host) + host_ips.append(ip) + interfaces.append(interface) + ips: List[str] = [] + for h in hosts: + _, ip = _get_valid_interface_and_ip(vip, h) + ips.append(ip) + other_ips.append(ips) + + # Use interface as vrrp_interface for vrrp traffic if vrrp_interface_network not set on the spec + vrrp_interfaces: List[str] = [] + if not spec.vrrp_interface_network: + vrrp_interfaces = interfaces + else: + for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items(): + if subnet == spec.vrrp_interface_network: + vrrp_interface = [list(ifaces.keys())[0]] * len(interfaces) + logger.info( + f'vrrp will be configured on {host} interface ' + f'{vrrp_interface} (which is in subnet {subnet})' + ) + break + else: + raise OrchestratorError( + f"Unable to identify vrrp interface for {spec.vrrp_interface_network} on {host}" + ) keepalived_conf = self.mgr.template.render( 'services/ingress/keepalived.conf.j2', @@ -326,12 +361,14 @@ class IngressService(CephService): 'spec': spec, 'script': script, 'password': password, - 'interface': interface, + 'interfaces': interfaces, + 'vrrp_interfaces': vrrp_interfaces, 'virtual_ips': virtual_ips, + 'first_virtual_router_id': spec.first_virtual_router_id, 'states': states, 'priorities': priorities, 'other_ips': other_ips, - 'host_ip': utils.resolve_ip(self.mgr.inventory.get_addr(host)), + 'host_ips': host_ips, } ) diff --git a/ceph/src/pybind/mgr/cephadm/services/monitoring.py b/ceph/src/pybind/mgr/cephadm/services/monitoring.py index e0c0640ae..114c84860 100644 --- a/ceph/src/pybind/mgr/cephadm/services/monitoring.py +++ b/ceph/src/pybind/mgr/cephadm/services/monitoring.py @@ -29,9 +29,10 @@ class GrafanaService(CephadmService): def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]: assert self.TYPE == daemon_spec.daemon_type + prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials() deps = [] # type: List[str] - if self.mgr.secure_monitoring_stack and self.mgr.prometheus_web_user and self.mgr.prometheus_web_password: - deps.append(f'{hash(self.mgr.prometheus_web_user + self.mgr.prometheus_web_password)}') + if self.mgr.secure_monitoring_stack and prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}') prom_services = [] # type: List[str] @@ -58,8 +59,8 @@ class GrafanaService(CephadmService): oneline_root_cert = '\\n'.join([line.strip() for line in root_cert.splitlines()]) grafana_data_sources = self.mgr.template.render('services/grafana/ceph-dashboard.yml.j2', {'hosts': prom_services, - 'prometheus_user': self.mgr.prometheus_web_user, - 'prometheus_password': self.mgr.prometheus_web_password, + 'prometheus_user': prometheus_user, + 'prometheus_password': prometheus_password, 'cephadm_root_ca': oneline_root_cert, 'security_enabled': self.mgr.secure_monitoring_stack, 'loki_host': loki_host}) @@ -191,6 +192,8 @@ class GrafanaService(CephadmService): class AlertmanagerService(CephadmService): TYPE = 'alertmanager' DEFAULT_SERVICE_PORT = 9093 + USER_CFG_KEY = 'alertmanager/web_user' + PASS_CFG_KEY = 'alertmanager/web_password' def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: assert self.TYPE == daemon_spec.daemon_type @@ -279,15 +282,16 @@ class AlertmanagerService(CephadmService): deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}') if self.mgr.secure_monitoring_stack: - if self.mgr.alertmanager_web_user and self.mgr.alertmanager_web_password: - deps.append(f'{hash(self.mgr.alertmanager_web_user + self.mgr.alertmanager_web_password)}') + alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials() + if alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') node_ip = self.mgr.inventory.get_addr(daemon_spec.host) host_fqdn = self._inventory_get_fqdn(daemon_spec.host) cert, key = self.mgr.http_server.service_discovery.ssl_certs.generate_cert( host_fqdn, node_ip) context = { - 'alertmanager_web_user': self.mgr.alertmanager_web_user, - 'alertmanager_web_password': password_hash(self.mgr.alertmanager_web_password), + 'alertmanager_web_user': alertmanager_user, + 'alertmanager_web_password': password_hash(alertmanager_password), } return { "files": { @@ -343,6 +347,8 @@ class PrometheusService(CephadmService): TYPE = 'prometheus' DEFAULT_SERVICE_PORT = 9095 DEFAULT_MGR_PROMETHEUS_PORT = 9283 + USER_CFG_KEY = 'prometheus/web_user' + PASS_CFG_KEY = 'prometheus/web_password' def config(self, spec: ServiceSpec) -> None: # make sure module is enabled @@ -397,10 +403,13 @@ class PrometheusService(CephadmService): mgr_prometheus_sd_url = f'{srv_end_point}service=mgr-prometheus' # always included ceph_exporter_sd_url = f'{srv_end_point}service=ceph-exporter' # always included + alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials() + prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials() + # generate the prometheus configuration context = { - 'alertmanager_web_user': self.mgr.alertmanager_web_user, - 'alertmanager_web_password': self.mgr.alertmanager_web_password, + 'alertmanager_web_user': alertmanager_user, + 'alertmanager_web_password': alertmanager_password, 'secure_monitoring_stack': self.mgr.secure_monitoring_stack, 'service_discovery_username': self.mgr.http_server.service_discovery.username, 'service_discovery_password': self.mgr.http_server.service_discovery.password, @@ -412,8 +421,8 @@ class PrometheusService(CephadmService): } web_context = { - 'prometheus_web_user': self.mgr.prometheus_web_user, - 'prometheus_web_password': password_hash(self.mgr.prometheus_web_password), + 'prometheus_web_user': prometheus_user, + 'prometheus_web_password': password_hash(prometheus_password), } if self.mgr.secure_monitoring_stack: @@ -482,10 +491,12 @@ class PrometheusService(CephadmService): # re-deploy prometheus if the mgr has changed (due to a fail-over i.e). deps.append(self.mgr.get_active_mgr().name()) if self.mgr.secure_monitoring_stack: - if self.mgr.prometheus_web_user and self.mgr.prometheus_web_password: - deps.append(f'{hash(self.mgr.prometheus_web_user + self.mgr.prometheus_web_password)}') - if self.mgr.alertmanager_web_user and self.mgr.alertmanager_web_password: - deps.append(f'{hash(self.mgr.alertmanager_web_user + self.mgr.alertmanager_web_password)}') + alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials() + prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials() + if prometheus_user and prometheus_password: + deps.append(f'{hash(prometheus_user + prometheus_password)}') + if alertmanager_user and alertmanager_password: + deps.append(f'{hash(alertmanager_user + alertmanager_password)}') deps.append(f'secure_monitoring_stack:{self.mgr.secure_monitoring_stack}') # add dependency on ceph-exporter daemons deps += [d.name() for d in self.mgr.cache.get_daemons_by_service('ceph-exporter')] diff --git a/ceph/src/pybind/mgr/cephadm/services/nfs.py b/ceph/src/pybind/mgr/cephadm/services/nfs.py index 0bd670377..f94a00f5b 100644 --- a/ceph/src/pybind/mgr/cephadm/services/nfs.py +++ b/ceph/src/pybind/mgr/cephadm/services/nfs.py @@ -1,4 +1,5 @@ import errno +import ipaddress import logging import os import subprocess @@ -92,9 +93,14 @@ class NFSService(CephService): # create the RGW keyring rgw_user = f'{rados_user}-rgw' rgw_keyring = self.create_rgw_keyring(daemon_spec) - bind_addr = spec.virtual_ip if spec.virtual_ip else (daemon_spec.ip if daemon_spec.ip else '') + if spec.virtual_ip: + bind_addr = spec.virtual_ip + else: + bind_addr = daemon_spec.ip if daemon_spec.ip else '' if not bind_addr: logger.warning(f'Bind address in {daemon_type}.{daemon_id}\'s ganesha conf is defaulting to empty') + else: + logger.debug("using haproxy bind address: %r", bind_addr) # generate the ganesha config def get_ganesha_conf() -> str: @@ -108,7 +114,11 @@ class NFSService(CephService): # fall back to default NFS port if not present in daemon_spec "port": daemon_spec.ports[0] if daemon_spec.ports else 2049, "bind_addr": bind_addr, + "haproxy_hosts": [], } + if spec.enable_haproxy_protocol: + context["haproxy_hosts"] = self._haproxy_hosts() + logger.debug("selected haproxy_hosts: %r", context["haproxy_hosts"]) return self.mgr.template.render('services/nfs/ganesha.conf.j2', context) # generate the cephadm config json @@ -291,3 +301,31 @@ class NFSService(CephService): stderr=subprocess.PIPE, timeout=10 ) + + def _haproxy_hosts(self) -> List[str]: + # NB: Ideally, we would limit the list to IPs on hosts running + # haproxy/ingress only, but due to the nature of cephadm today + # we'd "only know the set of haproxy hosts after they've been + # deployed" (quoth @adk7398). As it is today we limit the list + # of hosts we know are managed by cephadm. That ought to be + # good enough to prevent acceping haproxy protocol messages + # from "rouge" systems that are not under our control. At + # least until we learn otherwise. + cluster_ips: List[str] = [] + for host in self.mgr.inventory.keys(): + default_addr = self.mgr.inventory.get_addr(host) + cluster_ips.append(default_addr) + nets = self.mgr.cache.networks.get(host) + if not nets: + continue + for subnet, iface in nets.items(): + ip_subnet = ipaddress.ip_network(subnet) + if ipaddress.ip_address(default_addr) in ip_subnet: + continue # already present + if ip_subnet.is_loopback or ip_subnet.is_link_local: + continue # ignore special subnets + addrs: List[str] = sum((addr_list for addr_list in iface.values()), []) + if addrs: + # one address per interface/subnet is enough + cluster_ips.append(addrs[0]) + return cluster_ips diff --git a/ceph/src/pybind/mgr/cephadm/services/nvmeof.py b/ceph/src/pybind/mgr/cephadm/services/nvmeof.py new file mode 100644 index 000000000..7d2dd16cf --- /dev/null +++ b/ceph/src/pybind/mgr/cephadm/services/nvmeof.py @@ -0,0 +1,93 @@ +import errno +import logging +import json +from typing import List, cast, Optional + +from mgr_module import HandleCommandResult +from ceph.deployment.service_spec import NvmeofServiceSpec + +from orchestrator import DaemonDescription, DaemonDescriptionStatus +from .cephadmservice import CephadmDaemonDeploySpec, CephService +from .. import utils + +logger = logging.getLogger(__name__) + + +class NvmeofService(CephService): + TYPE = 'nvmeof' + + def config(self, spec: NvmeofServiceSpec) -> None: # type: ignore + assert self.TYPE == spec.service_type + assert spec.pool + self.mgr._check_pool_exists(spec.pool, spec.service_name()) + + def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec: + assert self.TYPE == daemon_spec.daemon_type + + spec = cast(NvmeofServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec) + nvmeof_gw_id = daemon_spec.daemon_id + host_ip = self.mgr.inventory.get_addr(daemon_spec.host) + + keyring = self.get_keyring_with_caps(self.get_auth_entity(nvmeof_gw_id), + ['mon', 'profile rbd', + 'osd', 'allow all tag rbd *=*']) + + # TODO: check if we can force jinja2 to generate dicts with double quotes instead of using json.dumps + transport_tcp_options = json.dumps(spec.transport_tcp_options) if spec.transport_tcp_options else None + name = '{}.{}'.format(utils.name_to_config_section('nvmeof'), nvmeof_gw_id) + rados_id = name[len('client.'):] if name.startswith('client.') else name + context = { + 'spec': spec, + 'name': name, + 'addr': host_ip, + 'port': spec.port, + 'log_level': 'WARN', + 'rpc_socket': '/var/tmp/spdk.sock', + 'transport_tcp_options': transport_tcp_options, + 'rados_id': rados_id + } + gw_conf = self.mgr.template.render('services/nvmeof/ceph-nvmeof.conf.j2', context) + + daemon_spec.keyring = keyring + daemon_spec.extra_files = {'ceph-nvmeof.conf': gw_conf} + daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec) + daemon_spec.deps = [] + return daemon_spec + + def config_dashboard(self, daemon_descrs: List[DaemonDescription]) -> None: + # TODO: what integration do we need with the dashboard? + pass + + def ok_to_stop(self, + daemon_ids: List[str], + force: bool = False, + known: Optional[List[str]] = None) -> HandleCommandResult: + # if only 1 nvmeof, alert user (this is not passable with --force) + warn, warn_message = self._enough_daemons_to_stop(self.TYPE, daemon_ids, 'Nvmeof', 1, True) + if warn: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + # if reached here, there is > 1 nvmeof daemon. make sure none are down + warn_message = ('ALERT: 1 nvmeof daemon is already down. Please bring it back up before stopping this one') + nvmeof_daemons = self.mgr.cache.get_daemons_by_type(self.TYPE) + for i in nvmeof_daemons: + if i.status != DaemonDescriptionStatus.running: + return HandleCommandResult(-errno.EBUSY, '', warn_message) + + names = [f'{self.TYPE}.{d_id}' for d_id in daemon_ids] + warn_message = f'It is presumed safe to stop {names}' + return HandleCommandResult(0, warn_message, '') + + def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None: + """ + Called after the daemon is removed. + """ + logger.debug(f'Post remove daemon {self.TYPE}.{daemon.daemon_id}') + # TODO: remove config for dashboard nvmeof gateways if any + # and any certificates being used for mTLS + + def purge(self, service_name: str) -> None: + """Removes configuration + """ + # TODO: what should we purge in this case (if any)? + pass diff --git a/ceph/src/pybind/mgr/cephadm/services/osd.py b/ceph/src/pybind/mgr/cephadm/services/osd.py index c8c8ac1a9..bfecc5723 100644 --- a/ceph/src/pybind/mgr/cephadm/services/osd.py +++ b/ceph/src/pybind/mgr/cephadm/services/osd.py @@ -13,6 +13,7 @@ from ceph.utils import datetime_to_str, str_to_datetime from datetime import datetime import orchestrator from cephadm.serve import CephadmServe +from cephadm.utils import SpecialHostLabels from ceph.utils import datetime_now from orchestrator import OrchestratorError, DaemonDescription from mgr_module import MonCommandFailed @@ -42,7 +43,7 @@ class OSDService(CephService): host, drive_group)) return None # skip this host if we cannot schedule here - if self.mgr.inventory.has_label(host, '_no_schedule'): + if self.mgr.inventory.has_label(host, SpecialHostLabels.DRAIN_DAEMONS): return None osd_id_claims_for_host = osd_id_claims.filtered_by_host(host) diff --git a/ceph/src/pybind/mgr/cephadm/ssh.py b/ceph/src/pybind/mgr/cephadm/ssh.py index 560b8b826..d17cc0fcc 100644 --- a/ceph/src/pybind/mgr/cephadm/ssh.py +++ b/ceph/src/pybind/mgr/cephadm/ssh.py @@ -123,7 +123,7 @@ class SSHManager: except OSError as e: self.mgr.offline_hosts.add(host) log_content = log_string.getvalue() - msg = f"Can't communicate with remote host `{addr}`, possibly because python3 is not installed there or you are missing NOPASSWD in sudoers. {str(e)}" + msg = f"Can't communicate with remote host `{addr}`, possibly because the host is not reachable or python3 is not installed on the host. {str(e)}" logger.exception(msg) raise HostConnectionError(msg, host, addr) except asyncssh.Error as e: @@ -151,31 +151,44 @@ class SSHManager: async def _execute_command(self, host: str, - cmd: List[str], + cmd_components: List[str], stdin: Optional[str] = None, addr: Optional[str] = None, log_command: Optional[bool] = True, ) -> Tuple[str, str, int]: + conn = await self._remote_connection(host, addr) sudo_prefix = "sudo " if self.mgr.ssh_user != 'root' else "" - cmd = sudo_prefix + " ".join(quote(x) for x in cmd) + cmd = sudo_prefix + " ".join(quote(x) for x in cmd_components) + try: + address = addr or self.mgr.inventory.get_addr(host) + except Exception: + address = host if log_command: logger.debug(f'Running command: {cmd}') try: - r = await conn.run(f'{sudo_prefix}true', check=True, timeout=5) + r = await conn.run(f'{sudo_prefix}true', check=True, timeout=5) # host quick check r = await conn.run(cmd, input=stdin) - # handle these Exceptions otherwise you might get a weird error like TypeError: __init__() missing 1 required positional argument: 'reason' (due to the asyncssh error interacting with raise_if_exception) - except (asyncssh.ChannelOpenError, asyncssh.ProcessError, Exception) as e: + # handle these Exceptions otherwise you might get a weird error like + # TypeError: __init__() missing 1 required positional argument: 'reason' (due to the asyncssh error interacting with raise_if_exception) + except asyncssh.ChannelOpenError as e: # SSH connection closed or broken, will create new connection next call logger.debug(f'Connection to {host} failed. {str(e)}') await self._reset_con(host) self.mgr.offline_hosts.add(host) - if not addr: - try: - addr = self.mgr.inventory.get_addr(host) - except Exception: - addr = host - raise HostConnectionError(f'Unable to reach remote host {host}. {str(e)}', host, addr) + raise HostConnectionError(f'Unable to reach remote host {host}. {str(e)}', host, address) + except asyncssh.ProcessError as e: + msg = f"Cannot execute the command '{cmd}' on the {host}. {str(e.stderr)}." + logger.debug(msg) + await self._reset_con(host) + self.mgr.offline_hosts.add(host) + raise HostConnectionError(msg, host, address) + except Exception as e: + msg = f"Generic error while executing command '{cmd}' on the host {host}. {str(e)}." + logger.debug(msg) + await self._reset_con(host) + self.mgr.offline_hosts.add(host) + raise HostConnectionError(msg, host, address) def _rstrip(v: Union[bytes, str, None]) -> str: if not v: @@ -318,18 +331,28 @@ class SSHManager: # identity ssh_key = self.mgr.get_store("ssh_identity_key") ssh_pub = self.mgr.get_store("ssh_identity_pub") + ssh_cert = self.mgr.get_store("ssh_identity_cert") self.mgr.ssh_pub = ssh_pub self.mgr.ssh_key = ssh_key - if ssh_key and ssh_pub: + self.mgr.ssh_cert = ssh_cert + if ssh_key: self.mgr.tkey = NamedTemporaryFile(prefix='cephadm-identity-') self.mgr.tkey.write(ssh_key.encode('utf-8')) os.fchmod(self.mgr.tkey.fileno(), 0o600) self.mgr.tkey.flush() # make visible to other processes - tpub = open(self.mgr.tkey.name + '.pub', 'w') - os.fchmod(tpub.fileno(), 0o600) - tpub.write(ssh_pub) - tpub.flush() # make visible to other processes - temp_files += [self.mgr.tkey, tpub] + temp_files += [self.mgr.tkey] + if ssh_pub: + tpub = open(self.mgr.tkey.name + '.pub', 'w') + os.fchmod(tpub.fileno(), 0o600) + tpub.write(ssh_pub) + tpub.flush() # make visible to other processes + temp_files += [tpub] + if ssh_cert: + tcert = open(self.mgr.tkey.name + '-cert.pub', 'w') + os.fchmod(tcert.fileno(), 0o600) + tcert.write(ssh_cert) + tcert.flush() # make visible to other processes + temp_files += [tcert] ssh_options += ['-i', self.mgr.tkey.name] self.mgr._temp_files = temp_files diff --git a/ceph/src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 b/ceph/src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 index a529798e7..100acce40 100644 --- a/ceph/src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 +++ b/ceph/src/pybind/mgr/cephadm/templates/services/ingress/haproxy.cfg.j2 @@ -81,6 +81,9 @@ backend backend mode tcp balance source hash-type consistent +{% if default_server_opts %} + default-server {{ default_server_opts|join(" ") }} +{% endif %} {% for server in servers %} server {{ server.name }} {{ server.ip }}:{{ server.port }} {% endfor %} diff --git a/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 b/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 index f560c9756..e19f556c6 100644 --- a/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 +++ b/ceph/src/pybind/mgr/cephadm/templates/services/ingress/keepalived.conf.j2 @@ -11,21 +11,23 @@ vrrp_script check_backend { vrrp_instance VI_{{ x }} { state {{ states[x] }} priority {{ priorities[x] }} - interface {{ interface }} - virtual_router_id {{ 50 + x }} + interface {{ vrrp_interfaces[x] }} + virtual_router_id {{ first_virtual_router_id + x }} advert_int 1 authentication { auth_type PASS auth_pass {{ password }} } - unicast_src_ip {{ host_ip }} +{% if not spec.use_keepalived_multicast %} + unicast_src_ip {{ host_ips[x] }} unicast_peer { - {% for ip in other_ips %} + {% for ip in other_ips[x] %} {{ ip }} {% endfor %} } +{% endif %} virtual_ipaddress { - {{ virtual_ips[x] }} dev {{ interface }} + {{ virtual_ips[x] }} dev {{ interfaces[x] }} } track_script { check_backend diff --git a/ceph/src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 b/ceph/src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 index 9d6e15f1c..ab8df7192 100644 --- a/ceph/src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 +++ b/ceph/src/pybind/mgr/cephadm/templates/services/nfs/ganesha.conf.j2 @@ -7,6 +7,9 @@ NFS_CORE_PARAM { {% if bind_addr %} Bind_addr = {{ bind_addr }}; {% endif %} +{% if haproxy_hosts %} + HAProxy_Hosts = {{ haproxy_hosts|join(", ") }}; +{% endif %} } NFSv4 { diff --git a/ceph/src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 b/ceph/src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 new file mode 100644 index 000000000..69b8332cd --- /dev/null +++ b/ceph/src/pybind/mgr/cephadm/templates/services/nvmeof/ceph-nvmeof.conf.j2 @@ -0,0 +1,34 @@ +# {{ cephadm_managed }} +[gateway] +name = {{ name }} +group = {{ spec.group }} +addr = {{ addr }} +port = {{ port }} +enable_auth = {{ spec.enable_auth }} +state_update_notify = True +state_update_interval_sec = 5 + +[ceph] +pool = {{ spec.pool }} +config_file = /etc/ceph/ceph.conf +id = {{ rados_id }} + +[mtls] +server_key = {{ spec.server_key }} +client_key = {{ spec.client_key }} +server_cert = {{ spec.server_cert }} +client_cert = {{ spec.client_cert }} + +[spdk] +tgt_path = {{ spec.tgt_path }} +rpc_socket = {{ rpc_socket }} +timeout = {{ spec.timeout }} +log_level = {{ log_level }} +conn_retries = {{ spec.conn_retries }} +transports = {{ spec.transports }} +{% if transport_tcp_options %} +transport_tcp_options = {{ transport_tcp_options }} +{% endif %} +{% if spec.tgt_cmd_extra_args %} +tgt_cmd_extra_args = {{ spec.tgt_cmd_extra_args }} +{% endif %} diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py b/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py index 364d4db69..24fcb0280 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -8,20 +8,29 @@ import pytest from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection from cephadm.serve import CephadmServe -from cephadm.inventory import HostCacheStatus +from cephadm.inventory import HostCacheStatus, ClientKeyringSpec from cephadm.services.osd import OSD, OSDRemovalQueue, OsdIdClaims +from cephadm.utils import SpecialHostLabels try: from typing import List except ImportError: pass -from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \ - NFSServiceSpec, IscsiServiceSpec, HostPlacementSpec, CustomContainerSpec, MDSSpec, \ - CustomConfig +from ceph.deployment.service_spec import ( + CustomConfig, + CustomContainerSpec, + HostPlacementSpec, + IscsiServiceSpec, + MDSSpec, + NFSServiceSpec, + PlacementSpec, + RGWSpec, + ServiceSpec, +) from ceph.deployment.drive_selection.selector import DriveSelection from ceph.deployment.inventory import Devices, Device -from ceph.utils import datetime_to_str, datetime_now +from ceph.utils import datetime_to_str, datetime_now, str_to_datetime from orchestrator import DaemonDescription, InventoryHost, \ HostSpec, OrchestratorError, DaemonDescriptionStatus, OrchestratorEvent from tests import mock @@ -119,10 +128,7 @@ def with_osd_daemon(cephadm_module: CephadmOrchestrator, _run_cephadm, host: str assert _run_cephadm.mock_calls == [ mock.call(host, 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True), - mock.call(host, f'osd.{osd_id}', 'deploy', - ['--name', f'osd.{osd_id}', '--meta-json', mock.ANY, - '--config-json', '-', '--osd-fsid', 'uuid'], - stdin=mock.ANY, image=''), + mock.call(host, f'osd.{osd_id}', ['_orch', 'deploy'], [], stdin=mock.ANY), mock.call(host, 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True), ] @@ -452,16 +458,37 @@ class TestCephadm(object): CephadmServe(cephadm_module)._check_daemons() _run_cephadm.assert_called_with( - 'test', 'mon.test', 'deploy', [ - '--name', 'mon.test', - '--meta-json', ('{"service_name": "mon", "ports": [], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--reconfig', - ], - stdin='{"config": "[mon]\\nk=v\\n[mon.test]\\npublic network = 127.0.0.0/8\\n", ' - + '"keyring": "", "files": {"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n"}}', - image='') + 'test', + 'mon.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "mon.test", + "image": '', + "deploy_arguments": [], + "params": { + 'reconfig': True, + }, + "meta": { + 'service_name': 'mon', + 'ports': [], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "[mon]\nk=v\n[mon.test]\npublic network = 127.0.0.0/8\n", + "keyring": "", + "files": { + "config": "[mon.test]\npublic network = 127.0.0.0/8\n" + }, + }, + }), + ) @mock.patch("cephadm.serve.CephadmServe._run_cephadm") def test_mon_crush_location_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestrator): @@ -486,14 +513,35 @@ class TestCephadm(object): with with_service(cephadm_module, ServiceSpec(service_type='mon', crush_locations={'test': ['datacenter=a', 'rack=2']}), CephadmOrchestrator.apply_mon, 'test'): _run_cephadm.assert_called_with( - 'test', 'mon.test', 'deploy', [ - '--name', 'mon.test', - '--meta-json', '{"service_name": "mon", "ports": [], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}', - '--config-json', '-', - ], - stdin=('{"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n", "keyring": "", ' - '"files": {"config": "[mon.test]\\npublic network = 127.0.0.0/8\\n"}, "crush_location": "datacenter=a"}'), - image='', + 'test', + 'mon.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "mon.test", + "image": '', + "deploy_arguments": [], + "params": {}, + "meta": { + 'service_name': 'mon', + 'ports': [], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "[mon.test]\npublic network = 127.0.0.0/8\n", + "keyring": "", + "files": { + "config": "[mon.test]\npublic network = 127.0.0.0/8\n", + }, + "crush_location": "datacenter=a", + }, + }), ) @mock.patch("cephadm.serve.CephadmServe._run_cephadm") @@ -502,16 +550,39 @@ class TestCephadm(object): with with_host(cephadm_module, 'test'): with with_service(cephadm_module, ServiceSpec(service_type='crash', extra_container_args=['--cpus=2', '--quiet']), CephadmOrchestrator.apply_crash): _run_cephadm.assert_called_with( - 'test', 'crash.test', 'deploy', [ - '--name', 'crash.test', - '--meta-json', ('{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": ["--cpus=2", "--quiet"], "extra_entrypoint_args": null}'), - '--config-json', '-', - '--extra-container-args=--cpus=2', - '--extra-container-args=--quiet' - ], - stdin='{"config": "", "keyring": "[client.crash.test]\\nkey = None\\n"}', - image='', + 'test', + 'crash.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "crash.test", + "image": '', + "deploy_arguments": [], + "params": { + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + }, + "meta": { + 'service_name': 'crash', + 'ports': [], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "", + "keyring": "[client.crash.test]\nkey = None\n", + }, + }), ) @mock.patch("cephadm.serve.CephadmServe._run_cephadm") @@ -522,19 +593,37 @@ class TestCephadm(object): extra_entrypoint_args=['--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', '--some-other-arg']), CephadmOrchestrator.apply_node_exporter): _run_cephadm.assert_called_with( - 'test', 'node-exporter.test', 'deploy', [ - '--name', 'node-exporter.test', - '--meta-json', ('{"service_name": "node-exporter", "ports": [9100], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": ' - '["--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", ' - '"--some-other-arg"]}'), - '--config-json', '-', - '--tcp-ports', '9100', - '--extra-entrypoint-args=--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', - '--extra-entrypoint-args=--some-other-arg' - ], - stdin='{}', - image='', + 'test', + 'node-exporter.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "node-exporter.test", + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9100], + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "meta": { + 'service_name': 'node-exporter', + 'ports': [9100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "config_blobs": {}, + }), ) @mock.patch("cephadm.serve.CephadmServe._run_cephadm") @@ -546,21 +635,44 @@ class TestCephadm(object): extra_container_args=['--cpus=2', '--quiet']), CephadmOrchestrator.apply_node_exporter): _run_cephadm.assert_called_with( - 'test', 'node-exporter.test', 'deploy', [ - '--name', 'node-exporter.test', - '--meta-json', ('{"service_name": "node-exporter", "ports": [9100], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": ["--cpus=2", "--quiet"], "extra_entrypoint_args": ' - '["--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", ' - '"--some-other-arg"]}'), - '--config-json', '-', - '--tcp-ports', '9100', - '--extra-container-args=--cpus=2', - '--extra-container-args=--quiet', - '--extra-entrypoint-args=--collector.textfile.directory=/var/lib/node_exporter/textfile_collector', - '--extra-entrypoint-args=--some-other-arg' - ], - stdin='{}', - image='', + 'test', + 'node-exporter.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "node-exporter.test", + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9100], + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "meta": { + 'service_name': 'node-exporter', + 'ports': [9100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': [ + "--cpus=2", + "--quiet", + ], + 'extra_entrypoint_args': [ + "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector", + "--some-other-arg", + ], + }, + "config_blobs": {}, + }), ) @mock.patch("cephadm.serve.CephadmServe._run_cephadm") @@ -572,24 +684,48 @@ class TestCephadm(object): extra_container_args=['--cpus 2', '--container-arg-with-value value']), CephadmOrchestrator.apply_node_exporter): _run_cephadm.assert_called_with( - 'test', 'node-exporter.test', 'deploy', [ - '--name', 'node-exporter.test', - '--meta-json', ('{"service_name": "node-exporter", "ports": [9100], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": ["--cpus 2", "--container-arg-with-value value"], ' - '"extra_entrypoint_args": ["--entrypoint-arg-with-value value", "--some-other-arg 3"]}'), - '--config-json', '-', - '--tcp-ports', '9100', - '--extra-container-args=--cpus', - '--extra-container-args=2', - '--extra-container-args=--container-arg-with-value', - '--extra-container-args=value', - '--extra-entrypoint-args=--entrypoint-arg-with-value', - '--extra-entrypoint-args=value', - '--extra-entrypoint-args=--some-other-arg', - '--extra-entrypoint-args=3' - ], - stdin='{}', - image='', + 'test', + 'node-exporter.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "node-exporter.test", + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9100], + 'extra_container_args': [ + "--cpus", + "2", + "--container-arg-with-value", + "value", + ], + 'extra_entrypoint_args': [ + "--entrypoint-arg-with-value", + "value", + "--some-other-arg", + "3", + ], + }, + "meta": { + 'service_name': 'node-exporter', + 'ports': [9100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': [ + "--cpus 2", + "--container-arg-with-value value", + ], + 'extra_entrypoint_args': [ + "--entrypoint-arg-with-value value", + "--some-other-arg 3", + ], + }, + "config_blobs": {}, + }), ) @mock.patch("cephadm.serve.CephadmServe._run_cephadm") @@ -608,20 +744,45 @@ class TestCephadm(object): mount_path='/etc/test.conf'), CustomConfig(content='\n'.join(test_cert), mount_path='/usr/share/grafana/thing.crt') ] - conf_outs = [json.dumps(c.to_json()) for c in configs] - stdin_str = '{' + \ - f'"config": "", "keyring": "[client.crash.test]\\nkey = None\\n", "custom_config_files": [{conf_outs[0]}, {conf_outs[1]}]' + '}' + tc_joined = '\n'.join(test_cert) with with_host(cephadm_module, 'test'): with with_service(cephadm_module, ServiceSpec(service_type='crash', custom_configs=configs), CephadmOrchestrator.apply_crash): - _run_cephadm.assert_called_with( - 'test', 'crash.test', 'deploy', [ - '--name', 'crash.test', - '--meta-json', ('{"service_name": "crash", "ports": [], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - ], - stdin=stdin_str, - image='', + _run_cephadm( + 'test', + 'crash.test', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "crash.test", + "image": "", + "deploy_arguments": [], + "params": {}, + "meta": { + "service_name": "crash", + "ports": [], + "ip": None, + "deployed_by": [], + "rank": None, + "rank_generation": None, + "extra_container_args": None, + "extra_entrypoint_args": None, + }, + "config_blobs": { + "config": "", + "keyring": "[client.crash.test]\nkey = None\n", + "custom_config_files": [ + { + "content": "something something something", + "mount_path": "/etc/test.conf", + }, + { + "content": tc_joined, + "mount_path": "/usr/share/grafana/thing.crt", + }, + ] + } + }), ) @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) @@ -1695,15 +1856,11 @@ class TestCephadm(object): # being in offline/maint mode should disqualify hosts from being # candidates for scheduling - candidates = [ - h.hostname for h in cephadm_module.cache.get_schedulable_hosts()] - assert 'test2' in candidates - assert 'test3' in candidates + assert cephadm_module.cache.is_host_schedulable('test2') + assert cephadm_module.cache.is_host_schedulable('test3') - unreachable = [ - h.hostname for h in cephadm_module.cache.get_unreachable_hosts()] - assert 'test2' in unreachable - assert 'test3' in unreachable + assert cephadm_module.cache.is_host_unreachable('test2') + assert cephadm_module.cache.is_host_unreachable('test3') with with_service(cephadm_module, ServiceSpec('crash', placement=PlacementSpec(host_pattern='*'))): # re-apply services. No mgr should be removed from maint/offline hosts @@ -1873,6 +2030,34 @@ class TestCephadm(object): assert f1_before_digest != f1_after_digest assert f2_before_digest != f2_after_digest + @mock.patch("cephadm.inventory.HostCache.get_host_client_files") + def test_dont_write_client_files_to_unreachable_hosts(self, _get_client_files, cephadm_module): + cephadm_module.inventory.add_host(HostSpec('host1', '1.2.3.1')) # online + cephadm_module.inventory.add_host(HostSpec('host2', '1.2.3.2')) # maintenance + cephadm_module.inventory.add_host(HostSpec('host3', '1.2.3.3')) # offline + + # mark host2 as maintenance and host3 as offline + cephadm_module.inventory._inventory['host2']['status'] = 'maintenance' + cephadm_module.offline_hosts.add('host3') + + # verify host2 and host3 are correctly marked as unreachable but host1 is not + assert not cephadm_module.cache.is_host_unreachable('host1') + assert cephadm_module.cache.is_host_unreachable('host2') + assert cephadm_module.cache.is_host_unreachable('host3') + + _get_client_files.side_effect = Exception('Called _get_client_files') + + # with the online host, should call _get_client_files which + # we have setup to raise an Exception + with pytest.raises(Exception, match='Called _get_client_files'): + CephadmServe(cephadm_module)._write_client_files({}, 'host1') + + # for the maintenance and offline host, _get_client_files should + # not be called and it should just return immediately with nothing + # having been raised + CephadmServe(cephadm_module)._write_client_files({}, 'host2') + CephadmServe(cephadm_module)._write_client_files({}, 'host3') + def test_etc_ceph_init(self): with with_cephadm_module({'manage_etc_ceph_ceph_conf': True}) as m: assert m.manage_etc_ceph_ceph_conf is True @@ -1919,6 +2104,35 @@ osd_k2 = osd_v2 assert cephadm_module.get_minimal_ceph_conf() == expected_combined_conf + def test_client_keyrings_special_host_labels(self, cephadm_module): + cephadm_module.inventory.add_host(HostSpec('host1', labels=['keyring1'])) + cephadm_module.inventory.add_host(HostSpec('host2', labels=['keyring1', SpecialHostLabels.DRAIN_DAEMONS])) + cephadm_module.inventory.add_host(HostSpec('host3', labels=['keyring1', SpecialHostLabels.DRAIN_DAEMONS, SpecialHostLabels.DRAIN_CONF_KEYRING])) + # hosts need to be marked as having had refresh to be available for placement + # so "refresh" with empty daemon list + cephadm_module.cache.update_host_daemons('host1', {}) + cephadm_module.cache.update_host_daemons('host2', {}) + cephadm_module.cache.update_host_daemons('host3', {}) + + assert 'host1' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()] + assert 'host2' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()] + assert 'host3' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_available_hosts()] + + assert 'host1' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()] + assert 'host2' not in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()] + assert 'host3' in [h.hostname for h in cephadm_module.cache.get_conf_keyring_draining_hosts()] + + cephadm_module.keys.update(ClientKeyringSpec('keyring1', PlacementSpec(label='keyring1'))) + + with mock.patch("cephadm.module.CephadmOrchestrator.mon_command") as _mon_cmd: + _mon_cmd.return_value = (0, 'real-keyring', '') + client_files = CephadmServe(cephadm_module)._calc_client_files() + assert 'host1' in client_files.keys() + assert '/etc/ceph/ceph.keyring1.keyring' in client_files['host1'].keys() + assert 'host2' in client_files.keys() + assert '/etc/ceph/ceph.keyring1.keyring' in client_files['host2'].keys() + assert 'host3' not in client_files.keys() + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") def test_registry_login(self, _run_cephadm, cephadm_module: CephadmOrchestrator): def check_registry_credentials(url, username, password): @@ -2058,7 +2272,7 @@ Traceback (most recent call last): if 'ceph-volume' in args: return (json.dumps(ceph_volume_lvm_list), '', 0) else: - assert 'deploy' in args + assert ['_orch', 'deploy'] in args raise OrchestratorError("let's fail somehow") _run_cephadm.side_effect = _r_c assert cephadm_module._osd_activate( @@ -2120,12 +2334,12 @@ Traceback (most recent call last): def test_host_rm_last_admin(self, cephadm_module: CephadmOrchestrator): with pytest.raises(OrchestratorError): with with_host(cephadm_module, 'test', refresh_hosts=False, rm_with_force=False): - cephadm_module.inventory.add_label('test', '_admin') + cephadm_module.inventory.add_label('test', SpecialHostLabels.ADMIN) pass assert False with with_host(cephadm_module, 'test1', refresh_hosts=False, rm_with_force=True): with with_host(cephadm_module, 'test2', refresh_hosts=False, rm_with_force=False): - cephadm_module.inventory.add_label('test2', '_admin') + cephadm_module.inventory.add_label('test2', SpecialHostLabels.ADMIN) @pytest.mark.parametrize("facts, settings, expected_value", [ @@ -2325,3 +2539,171 @@ Traceback (most recent call last): with pytest.raises(OrchestratorError, match=r'Command "very slow" timed out on host hostC \(non-default 999 second timeout\)'): with cephadm_module.async_timeout_handler('hostC', 'very slow', 999): cephadm_module.wait_async(_timeout()) + + @mock.patch("cephadm.CephadmOrchestrator.remove_osds") + @mock.patch("cephadm.CephadmOrchestrator.add_host_label", lambda *a, **kw: None) + @mock.patch("cephadm.inventory.HostCache.get_daemons_by_host", lambda *a, **kw: []) + def test_host_drain_zap(self, _rm_osds, cephadm_module): + # pass force=true in these tests to bypass _admin label check + cephadm_module.drain_host('host1', force=True, zap_osd_devices=False) + assert _rm_osds.called_with([], zap=False) + + cephadm_module.drain_host('host1', force=True, zap_osd_devices=True) + assert _rm_osds.called_with([], zap=True) + + def test_process_ls_output(self, cephadm_module): + sample_ls_output = """[ + { + "style": "cephadm:v1", + "name": "mon.vm-00", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mon.vm-00", + "enabled": true, + "state": "running", + "service_name": "mon", + "ports": [], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "memory_request": null, + "memory_limit": null, + "container_id": "b170b964a6e2918955362eb36195627c6086d3f859d4ebce2ee13f3ee4738733", + "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3", + "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55", + "container_image_digests": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "memory_usage": 56214159, + "cpu_percentage": "2.32%", + "version": "18.0.0-5185-g7b3a4f2b", + "started": "2023-09-22T22:31:11.752300Z", + "created": "2023-09-22T22:15:24.121387Z", + "deployed": "2023-09-22T22:31:10.383431Z", + "configured": "2023-09-22T22:31:11.859440Z" + }, + { + "style": "cephadm:v1", + "name": "mgr.vm-00.mpexeg", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@mgr.vm-00.mpexeg", + "enabled": true, + "state": "running", + "service_name": "mgr", + "ports": [ + 8443, + 9283, + 8765 + ], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "memory_request": null, + "memory_limit": null, + "container_id": "6e7756cef553a25a2a84227e8755d3d25046b9cd8758b23c698d34b3af895242", + "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3", + "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55", + "container_image_digests": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "memory_usage": 529740595, + "cpu_percentage": "8.35%", + "version": "18.0.0-5185-g7b3a4f2b", + "started": "2023-09-22T22:30:18.587021Z", + "created": "2023-09-22T22:15:29.101409Z", + "deployed": "2023-09-22T22:30:17.339114Z", + "configured": "2023-09-22T22:30:18.758122Z" + }, + { + "style": "cephadm:v1", + "name": "agent.vm-00", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@agent.vm-00", + "enabled": true, + "state": "running", + "service_name": "agent", + "ports": [], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "container_id": null, + "container_image_name": null, + "container_image_id": null, + "container_image_digests": null, + "version": null, + "started": null, + "created": "2023-09-22T22:33:34.708289Z", + "deployed": null, + "configured": "2023-09-22T22:33:34.722289Z" + }, + { + "style": "cephadm:v1", + "name": "osd.0", + "fsid": "588f83ba-5995-11ee-9e94-52540057a206", + "systemd_unit": "ceph-588f83ba-5995-11ee-9e94-52540057a206@osd.0", + "enabled": true, + "state": "running", + "service_name": "osd.foo", + "ports": [], + "ip": null, + "deployed_by": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "rank": null, + "rank_generation": null, + "extra_container_args": null, + "extra_entrypoint_args": null, + "memory_request": null, + "memory_limit": null, + "container_id": "93f71c60820b86901a45b3b1fe3dba3e3e677b37fd22310b7e7da3f67bb8ccd6", + "container_image_name": "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3", + "container_image_id": "674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55", + "container_image_digests": [ + "quay.io/adk3798/ceph@sha256:ff374767a4568f6d11a941ab763e7732cd7e071362328f7b6a7891bc4852a3a3" + ], + "memory_usage": 73410805, + "cpu_percentage": "6.54%", + "version": "18.0.0-5185-g7b3a4f2b", + "started": "2023-09-22T22:41:29.019587Z", + "created": "2023-09-22T22:41:03.615080Z", + "deployed": "2023-09-22T22:41:24.965222Z", + "configured": "2023-09-22T22:41:29.119250Z" + } +]""" + + now = str_to_datetime('2023-09-22T22:45:29.119250Z') + cephadm_module._cluster_fsid = '588f83ba-5995-11ee-9e94-52540057a206' + with mock.patch("cephadm.module.datetime_now", lambda: now): + cephadm_module._process_ls_output('vm-00', json.loads(sample_ls_output)) + assert 'vm-00' in cephadm_module.cache.daemons + assert 'mon.vm-00' in cephadm_module.cache.daemons['vm-00'] + assert 'mgr.vm-00.mpexeg' in cephadm_module.cache.daemons['vm-00'] + assert 'agent.vm-00' in cephadm_module.cache.daemons['vm-00'] + assert 'osd.0' in cephadm_module.cache.daemons['vm-00'] + + daemons = cephadm_module.cache.get_daemons_by_host('vm-00') + c_img_ids = [dd.container_image_id for dd in daemons if dd.daemon_type != 'agent'] + assert all(c_img_id == '674eb38037f1555bb7884ede5db47f1749486e7f12ecb416e34ada87c9934e55' for c_img_id in c_img_ids) + last_refreshes = [dd.last_refresh for dd in daemons] + assert all(lrf == now for lrf in last_refreshes) + versions = [dd.version for dd in daemons if dd.daemon_type != 'agent'] + assert all(version == '18.0.0-5185-g7b3a4f2b' for version in versions) + + osd = cephadm_module.cache.get_daemons_by_type('osd', 'vm-00')[0] + assert osd.cpu_percentage == '6.54%' + assert osd.memory_usage == 73410805 + assert osd.created == str_to_datetime('2023-09-22T22:41:03.615080Z') diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_services.py b/ceph/src/pybind/mgr/cephadm/tests/test_services.py index 192b08fb9..2300b288d 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_services.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_services.py @@ -13,13 +13,14 @@ from cephadm.services.cephadmservice import MonService, MgrService, MdsService, RbdMirrorService, CrashService, CephadmDaemonDeploySpec from cephadm.services.iscsi import IscsiService from cephadm.services.nfs import NFSService +from cephadm.services.nvmeof import NvmeofService from cephadm.services.osd import OSDService from cephadm.services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \ NodeExporterService, LokiService, PromtailService from cephadm.module import CephadmOrchestrator from ceph.deployment.service_spec import IscsiServiceSpec, MonitoringSpec, AlertManagerSpec, \ ServiceSpec, RGWSpec, GrafanaSpec, SNMPGatewaySpec, IngressSpec, PlacementSpec, TracingSpec, \ - PrometheusSpec, CephExporterSpec, NFSServiceSpec + PrometheusSpec, CephExporterSpec, NFSServiceSpec, NvmeofServiceSpec from cephadm.tests.fixtures import with_host, with_service, _run_cephadm, async_side_effect from ceph.utils import datetime_now @@ -105,6 +106,7 @@ class TestCephadmService: promtail_service = PromtailService(mgr) crash_service = CrashService(mgr) iscsi_service = IscsiService(mgr) + nvmeof_service = NvmeofService(mgr) cephadm_services = { 'mon': mon_service, 'mgr': mgr_service, @@ -121,6 +123,7 @@ class TestCephadmService: 'promtail': promtail_service, 'crash': crash_service, 'iscsi': iscsi_service, + 'nvmeof': nvmeof_service, } return cephadm_services @@ -299,18 +302,144 @@ log_to_file = False""" _run_cephadm.assert_called_with( 'test', f'iscsi.{iscsi_daemon_id}', - 'deploy', - [ - '--name', f'iscsi.{iscsi_daemon_id}', - '--meta-json', f'{"{"}"service_name": "iscsi.{pool}", "ports": [{api_port}], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null{"}"}', - '--config-json', '-', '--tcp-ports', '3456' - ], - stdin=json.dumps({"config": "", "keyring": f"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n", "files": {"iscsi-gateway.cfg": iscsi_gateway_conf}}), - image='') + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": f'iscsi.{iscsi_daemon_id}', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [api_port], + }, + "meta": { + 'service_name': f'iscsi.{pool}', + 'ports': [api_port], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "config": "", + "keyring": f"[client.iscsi.{iscsi_daemon_id}]\nkey = None\n", + "files": { + "iscsi-gateway.cfg": iscsi_gateway_conf, + }, + } + }), + ) + + +class TestNVMEOFService: + + mgr = FakeMgr() + nvmeof_service = NvmeofService(mgr) + + nvmeof_spec = NvmeofServiceSpec(service_type='nvmeof', service_id="a") + nvmeof_spec.daemon_type = 'nvmeof' + nvmeof_spec.daemon_id = "a" + nvmeof_spec.spec = MagicMock() + nvmeof_spec.spec.daemon_type = 'nvmeof' + + mgr.spec_store = MagicMock() + mgr.spec_store.all_specs.get.return_value = nvmeof_spec + + def test_nvmeof_client_caps(self): + pass + + @patch('cephadm.utils.resolve_ip') + def test_nvmeof_dashboard_config(self, mock_resolve_ip): + pass + + @patch("cephadm.inventory.Inventory.get_addr", lambda _, __: '192.168.100.100') + @patch("cephadm.serve.CephadmServe._run_cephadm") + @patch("cephadm.module.CephadmOrchestrator.get_unique_name") + def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrchestrator): + + nvmeof_daemon_id = 'testpool.test.qwert' + pool = 'testpool' + tgt_cmd_extra_args = '--cpumask=0xFF --msg-mempool-size=524288' + default_port = 5500 + group = 'mygroup' + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + _get_name.return_value = nvmeof_daemon_id + + nvmeof_gateway_conf = f"""# This file is generated by cephadm. +[gateway] +name = client.nvmeof.{nvmeof_daemon_id} +group = {group} +addr = 192.168.100.100 +port = {default_port} +enable_auth = False +state_update_notify = True +state_update_interval_sec = 5 + +[ceph] +pool = {pool} +config_file = /etc/ceph/ceph.conf +id = nvmeof.{nvmeof_daemon_id} + +[mtls] +server_key = ./server.key +client_key = ./client.key +server_cert = ./server.crt +client_cert = ./client.crt + +[spdk] +tgt_path = /usr/local/bin/nvmf_tgt +rpc_socket = /var/tmp/spdk.sock +timeout = 60 +log_level = WARN +conn_retries = 10 +transports = tcp +transport_tcp_options = {{"in_capsule_data_size": 8192, "max_io_qpairs_per_ctrlr": 7}} +tgt_cmd_extra_args = {tgt_cmd_extra_args}\n""" + + with with_host(cephadm_module, 'test'): + with with_service(cephadm_module, NvmeofServiceSpec(service_id=pool, + tgt_cmd_extra_args=tgt_cmd_extra_args, + group=group, + pool=pool)): + _run_cephadm.assert_called_with( + 'test', + f'nvmeof.{nvmeof_daemon_id}', + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": "nvmeof.testpool.test.qwert", + "image": "", + "deploy_arguments": [], + "params": { + "tcp_ports": [5500, 4420, 8009] + }, + "meta": { + "service_name": "nvmeof.testpool", + "ports": [5500, 4420, 8009], + "ip": None, + "deployed_by": [], + "rank": None, + "rank_generation": None, + "extra_container_args": None, + "extra_entrypoint_args": None + }, + "config_blobs": { + "config": "", + "keyring": "[client.nvmeof.testpool.test.qwert]\nkey = None\n", + "files": { + "ceph-nvmeof.conf": nvmeof_gateway_conf + } + } + }), + ) class TestMonitoring: def _get_config(self, url: str) -> str: + return f""" # This file is generated by cephadm. # See https://prometheus.io/docs/alerting/configuration/ for documentation. @@ -386,30 +515,41 @@ class TestMonitoring: with with_service(cephadm_module, AlertManagerSpec()): y = dedent(self._get_config(expected_yaml_url)).lstrip() _run_cephadm.assert_called_with( - "test", + 'test', "alertmanager.test", - "deploy", - [ - "--name", - "alertmanager.test", - "--meta-json", - ('{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - "--config-json", - "-", - "--tcp-ports", - "9093 9094", - ], - stdin=json.dumps( - {"files": {"alertmanager.yml": y}, "peers": []} - ), - image="", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'alertmanager.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9093, 9094], + }, + "meta": { + 'service_name': 'alertmanager', + 'ports': [9093, 9094], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "alertmanager.yml": y, + }, + "peers": [], + } + }), ) @patch("cephadm.serve.CephadmServe._run_cephadm") @patch("socket.getfqdn") @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1') - @patch("cephadm.services.monitoring.password_hash", lambda password: 'fake_password') + @patch("cephadm.services.monitoring.password_hash", lambda password: 'alertmanager_password_hash') def test_alertmanager_config_security_enabled(self, _get_fqdn, _run_cephadm, cephadm_module: CephadmOrchestrator): _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) @@ -424,8 +564,8 @@ class TestMonitoring: with with_host(cephadm_module, 'test'): cephadm_module.secure_monitoring_stack = True - cephadm_module.alertmanager_web_password = 'fake_password' - cephadm_module.alertmanager_web_user = 'admin' + cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user') + cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password') cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock(side_effect=gen_cert) cephadm_module.http_server.service_discovery.ssl_certs.get_root_cert = MagicMock(side_effect=get_root_cert) with with_service(cephadm_module, AlertManagerSpec()): @@ -462,29 +602,44 @@ class TestMonitoring: cert_file: alertmanager.crt key_file: alertmanager.key basic_auth_users: - admin: fake_password""").lstrip() + alertmanager_user: alertmanager_password_hash""").lstrip() _run_cephadm.assert_called_with( 'test', - 'alertmanager.test', - 'deploy', - [ - '--name', 'alertmanager.test', - '--meta-json', '{"service_name": "alertmanager", "ports": [9093, 9094], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}', - '--config-json', '-', '--tcp-ports', '9093 9094' - ], + "alertmanager.test", + ['_orch', 'deploy'], + [], stdin=json.dumps({ - "files": { - "alertmanager.yml": y, - 'alertmanager.crt': 'mycert', - 'alertmanager.key': 'mykey', - 'web.yml': web_config, - 'root_cert.pem': 'my_root_cert' + "fsid": "fsid", + "name": 'alertmanager.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9093, 9094], + }, + "meta": { + 'service_name': 'alertmanager', + 'ports': [9093, 9094], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, }, - 'peers': [], - 'web_config': '/etc/alertmanager/web.yml' + "config_blobs": { + "files": { + "alertmanager.yml": y, + 'alertmanager.crt': 'mycert', + 'alertmanager.key': 'mykey', + 'web.yml': web_config, + 'root_cert.pem': 'my_root_cert' + }, + 'peers': [], + 'web_config': '/etc/alertmanager/web.yml', + } }), - image='') + ) @patch("cephadm.serve.CephadmServe._run_cephadm") @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1') @@ -492,6 +647,12 @@ class TestMonitoring: _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast') with with_host(cephadm_module, 'test'): + # host "test" needs to have networks for keepalive to be placed + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + }) with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \ with_service(cephadm_module, CephExporterSpec('ceph-exporter')) as _, \ with_service(cephadm_module, s) as _, \ @@ -542,25 +703,41 @@ class TestMonitoring: _run_cephadm.assert_called_with( 'test', - 'prometheus.test', - 'deploy', - [ - '--name', 'prometheus.test', - '--meta-json', - ('{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '9095' - ], - stdin=json.dumps({"files": {"prometheus.yml": y, - "/etc/prometheus/alerting/custom_alerts.yml": ""}, - 'retention_time': '15d', - 'retention_size': '0'}), - image='') + "prometheus.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'prometheus.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9095], + }, + "meta": { + 'service_name': 'prometheus', + 'ports': [9095], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "prometheus.yml": y, + "/etc/prometheus/alerting/custom_alerts.yml": "", + }, + 'retention_time': '15d', + 'retention_size': '0', + }, + }), + ) @patch("cephadm.serve.CephadmServe._run_cephadm") @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '::1') - @patch("cephadm.services.monitoring.password_hash", lambda password: 'fake_password') + @patch("cephadm.services.monitoring.password_hash", lambda password: 'prometheus_password_hash') def test_prometheus_config_security_enabled(self, _run_cephadm, cephadm_module: CephadmOrchestrator): _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), rgw_frontend_type='beast') @@ -570,10 +747,20 @@ class TestMonitoring: with with_host(cephadm_module, 'test'): cephadm_module.secure_monitoring_stack = True - cephadm_module.http_server.service_discovery.username = 'admin' - cephadm_module.http_server.service_discovery.password = 'fake_password' + cephadm_module.set_store(PrometheusService.USER_CFG_KEY, 'prometheus_user') + cephadm_module.set_store(PrometheusService.PASS_CFG_KEY, 'prometheus_plain_password') + cephadm_module.set_store(AlertmanagerService.USER_CFG_KEY, 'alertmanager_user') + cephadm_module.set_store(AlertmanagerService.PASS_CFG_KEY, 'alertmanager_plain_password') + cephadm_module.http_server.service_discovery.username = 'sd_user' + cephadm_module.http_server.service_discovery.password = 'sd_password' cephadm_module.http_server.service_discovery.ssl_certs.generate_cert = MagicMock( side_effect=gen_cert) + # host "test" needs to have networks for keepalive to be placed + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + }) with with_service(cephadm_module, MonitoringSpec('node-exporter')) as _, \ with_service(cephadm_module, s) as _, \ with_service(cephadm_module, AlertManagerSpec('alertmanager')) as _, \ @@ -592,7 +779,7 @@ class TestMonitoring: cert_file: prometheus.crt key_file: prometheus.key basic_auth_users: - admin: fake_password""").lstrip() + prometheus_user: prometheus_password_hash""").lstrip() y = dedent(""" # This file is generated by cephadm. @@ -606,15 +793,15 @@ class TestMonitoring: alertmanagers: - scheme: https basic_auth: - username: admin - password: admin + username: alertmanager_user + password: alertmanager_plain_password tls_config: ca_file: root_cert.pem http_sd_configs: - url: https://[::1]:8765/sd/prometheus/sd-config?service=alertmanager basic_auth: - username: admin - password: fake_password + username: sd_user + password: sd_password tls_config: ca_file: root_cert.pem @@ -627,8 +814,8 @@ class TestMonitoring: http_sd_configs: - url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus basic_auth: - username: admin - password: fake_password + username: sd_user + password: sd_password tls_config: ca_file: root_cert.pem @@ -639,8 +826,8 @@ class TestMonitoring: http_sd_configs: - url: https://[::1]:8765/sd/prometheus/sd-config?service=node-exporter basic_auth: - username: admin - password: fake_password + username: sd_user + password: sd_password tls_config: ca_file: root_cert.pem @@ -651,8 +838,8 @@ class TestMonitoring: http_sd_configs: - url: https://[::1]:8765/sd/prometheus/sd-config?service=haproxy basic_auth: - username: admin - password: fake_password + username: sd_user + password: sd_password tls_config: ca_file: root_cert.pem @@ -664,37 +851,50 @@ class TestMonitoring: http_sd_configs: - url: https://[::1]:8765/sd/prometheus/sd-config?service=ceph-exporter basic_auth: - username: admin - password: fake_password + username: sd_user + password: sd_password tls_config: ca_file: root_cert.pem """).lstrip() _run_cephadm.assert_called_with( 'test', - 'prometheus.test', - 'deploy', - [ - '--name', 'prometheus.test', - '--meta-json', - '{"service_name": "prometheus", "ports": [9095], "ip": null, "deployed_by": [], "rank": null, "rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}', - '--config-json', '-', - '--tcp-ports', '9095' - ], + "prometheus.test", + ['_orch', 'deploy'], + [], stdin=json.dumps({ - 'files': { - 'prometheus.yml': y, - 'root_cert.pem': '', - 'mgr_prometheus_cert.pem': '', - 'web.yml': web_config, - 'prometheus.crt': 'mycert', - 'prometheus.key': 'mykey', - "/etc/prometheus/alerting/custom_alerts.yml": "", + "fsid": "fsid", + "name": 'prometheus.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9095], + }, + "meta": { + 'service_name': 'prometheus', + 'ports': [9095], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, }, - 'retention_time': '15d', - 'retention_size': '0', - 'web_config': '/etc/prometheus/web.yml'}), - image='' + "config_blobs": { + 'files': { + 'prometheus.yml': y, + 'root_cert.pem': '', + 'mgr_prometheus_cert.pem': '', + 'web.yml': web_config, + 'prometheus.crt': 'mycert', + 'prometheus.key': 'mykey', + "/etc/prometheus/alerting/custom_alerts.yml": "", + }, + 'retention_time': '15d', + 'retention_size': '0', + 'web_config': '/etc/prometheus/web.yml', + }, + }), ) @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -736,18 +936,34 @@ class TestMonitoring: _run_cephadm.assert_called_with( 'test', - 'loki.test', - 'deploy', - [ - '--name', 'loki.test', - '--meta-json', - ('{"service_name": "loki", "ports": [3100], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '3100' - ], - stdin=json.dumps({"files": {"loki.yml": y}}), - image='') + "loki.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'loki.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [3100], + }, + "meta": { + 'service_name': 'loki', + 'ports': [3100], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "loki.yml": y + }, + }, + }), + ) @patch("cephadm.serve.CephadmServe._run_cephadm") def test_promtail_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator): @@ -778,18 +994,34 @@ class TestMonitoring: _run_cephadm.assert_called_with( 'test', - 'promtail.test', - 'deploy', - [ - '--name', 'promtail.test', - '--meta-json', - ('{"service_name": "promtail", "ports": [9080], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '9080' - ], - stdin=json.dumps({"files": {"promtail.yml": y}}), - image='') + "promtail.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'promtail.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9080], + }, + "meta": { + 'service_name': 'promtail', + 'ports': [9080], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": { + "promtail.yml": y + }, + }, + }), + ) @patch("cephadm.serve.CephadmServe._run_cephadm") @patch("cephadm.module.CephadmOrchestrator.get_mgr_ip", lambda _: '1::4') @@ -861,16 +1093,32 @@ class TestMonitoring: _run_cephadm.assert_called_with( 'test', - 'grafana.test', - 'deploy', - [ - '--name', 'grafana.test', - '--meta-json', - ('{"service_name": "grafana", "ports": [3000], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', '--tcp-ports', '3000'], - stdin=json.dumps({"files": files}), - image='') + "grafana.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'grafana.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [3000], + }, + "meta": { + 'service_name': 'grafana', + 'ports': [3000], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": { + "files": files, + }, + }), + ) @patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) def test_grafana_initial_admin_pw(self, cephadm_module: CephadmOrchestrator): @@ -991,16 +1239,32 @@ spec: CephadmServe(cephadm_module)._check_daemons() _run_cephadm.assert_called_with( - 'test', 'alertmanager.test', 'deploy', [ - '--name', 'alertmanager.test', - '--meta-json', ('{"service_name": "alertmanager", "ports": [4200, 9094], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '4200 9094', - '--reconfig' - ], - stdin='{}', - image='') + 'test', + "alertmanager.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'alertmanager.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [4200, 9094], + 'reconfig': True, + }, + "meta": { + 'service_name': 'alertmanager', + 'ports': [4200, 9094], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": {}, + }), + ) class TestRGWService: @@ -1083,18 +1347,29 @@ class TestSNMPGateway: with with_service(cephadm_module, spec): _run_cephadm.assert_called_with( 'test', - 'snmp-gateway.test', - 'deploy', - [ - '--name', 'snmp-gateway.test', - '--meta-json', - ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '9464' - ], - stdin=json.dumps(config), - image='' + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9464], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9464], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), ) @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -1119,18 +1394,29 @@ class TestSNMPGateway: with with_service(cephadm_module, spec): _run_cephadm.assert_called_with( 'test', - 'snmp-gateway.test', - 'deploy', - [ - '--name', 'snmp-gateway.test', - '--meta-json', - ('{"service_name": "snmp-gateway", "ports": [9465], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '9465' - ], - stdin=json.dumps(config), - image='' + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9465], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9465], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), ) @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -1159,18 +1445,29 @@ class TestSNMPGateway: with with_service(cephadm_module, spec): _run_cephadm.assert_called_with( 'test', - 'snmp-gateway.test', - 'deploy', - [ - '--name', 'snmp-gateway.test', - '--meta-json', - ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '9464' - ], - stdin=json.dumps(config), - image='' + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9464], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9464], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), ) @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -1204,28 +1501,50 @@ class TestSNMPGateway: with with_service(cephadm_module, spec): _run_cephadm.assert_called_with( 'test', - 'snmp-gateway.test', - 'deploy', - [ - '--name', 'snmp-gateway.test', - '--meta-json', - ('{"service_name": "snmp-gateway", "ports": [9464], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '9464' - ], - stdin=json.dumps(config), - image='' + "snmp-gateway.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'snmp-gateway.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9464], + }, + "meta": { + 'service_name': 'snmp-gateway', + 'ports': [9464], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), ) class TestIngressService: + @pytest.mark.parametrize( + "enable_haproxy_protocol", + [False, True], + ) @patch("cephadm.inventory.Inventory.get_addr") @patch("cephadm.utils.resolve_ip") @patch("cephadm.inventory.HostCache.get_daemons_by_service") @patch("cephadm.serve.CephadmServe._run_cephadm") - def test_ingress_config_nfs_multiple_nfs_same_rank(self, _run_cephadm, _get_daemons_by_service, _resolve_ip, _get_addr, cephadm_module: CephadmOrchestrator): + def test_ingress_config_nfs_multiple_nfs_same_rank( + self, + _run_cephadm, + _get_daemons_by_service, + _resolve_ip, _get_addr, + cephadm_module: CephadmOrchestrator, + enable_haproxy_protocol: bool, + ): _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) def fake_resolve_ip(hostname: str) -> str: @@ -1241,18 +1560,27 @@ class TestIngressService: return hostname _get_addr.side_effect = fake_get_addr - nfs_service = NFSServiceSpec(service_id="foo", placement=PlacementSpec(count=1, hosts=['host1', 'host2']), - port=12049) - - ispec = IngressSpec(service_type='ingress', - service_id='nfs.foo', - backend_service='nfs.foo', - frontend_port=2049, - monitor_port=9049, - virtual_ip='192.168.122.100/24', - monitor_user='admin', - monitor_password='12345', - keepalived_password='12345') + nfs_service = NFSServiceSpec( + service_id="foo", + placement=PlacementSpec( + count=1, + hosts=['host1', 'host2']), + port=12049, + enable_haproxy_protocol=enable_haproxy_protocol, + ) + + ispec = IngressSpec( + service_type='ingress', + service_id='nfs.foo', + backend_service='nfs.foo', + frontend_port=2049, + monitor_port=9049, + virtual_ip='192.168.122.100/24', + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + enable_haproxy_protocol=enable_haproxy_protocol, + ) cephadm_module.spec_store._specs = { 'nfs.foo': nfs_service, @@ -1267,46 +1595,47 @@ class TestIngressService: # for the host1 nfs daemon as we'll end up giving that # one higher rank_generation but the same rank as the one # on host2 + haproxy_txt = ( + '# This file is generated by cephadm.\n' + 'global\n' + ' log 127.0.0.1 local2\n' + ' chroot /var/lib/haproxy\n' + ' pidfile /var/lib/haproxy/haproxy.pid\n' + ' maxconn 8000\n' + ' daemon\n' + ' stats socket /var/lib/haproxy/stats\n\n' + 'defaults\n' + ' mode tcp\n' + ' log global\n' + ' timeout queue 1m\n' + ' timeout connect 10s\n' + ' timeout client 1m\n' + ' timeout server 1m\n' + ' timeout check 10s\n' + ' maxconn 8000\n\n' + 'frontend stats\n' + ' mode http\n' + ' bind 192.168.122.100:9049\n' + ' bind host1:9049\n' + ' stats enable\n' + ' stats uri /stats\n' + ' stats refresh 10s\n' + ' stats auth admin:12345\n' + ' http-request use-service prometheus-exporter if { path /metrics }\n' + ' monitor-uri /health\n\n' + 'frontend frontend\n' + ' bind 192.168.122.100:2049\n' + ' default_backend backend\n\n' + 'backend backend\n' + ' mode tcp\n' + ' balance source\n' + ' hash-type consistent\n' + ) + if enable_haproxy_protocol: + haproxy_txt += ' default-server send-proxy-v2\n' + haproxy_txt += ' server nfs.foo.0 192.168.122.111:12049\n' haproxy_expected_conf = { - 'files': - { - 'haproxy.cfg': - '# This file is generated by cephadm.\n' - 'global\n' - ' log 127.0.0.1 local2\n' - ' chroot /var/lib/haproxy\n' - ' pidfile /var/lib/haproxy/haproxy.pid\n' - ' maxconn 8000\n' - ' daemon\n' - ' stats socket /var/lib/haproxy/stats\n\n' - 'defaults\n' - ' mode tcp\n' - ' log global\n' - ' timeout queue 1m\n' - ' timeout connect 10s\n' - ' timeout client 1m\n' - ' timeout server 1m\n' - ' timeout check 10s\n' - ' maxconn 8000\n\n' - 'frontend stats\n' - ' mode http\n' - ' bind 192.168.122.100:9049\n' - ' bind host1:9049\n' - ' stats enable\n' - ' stats uri /stats\n' - ' stats refresh 10s\n' - ' stats auth admin:12345\n' - ' http-request use-service prometheus-exporter if { path /metrics }\n' - ' monitor-uri /health\n\n' - 'frontend frontend\n' - ' bind 192.168.122.100:2049\n' - ' default_backend backend\n\n' - 'backend backend\n' - ' mode tcp\n' - ' balance source\n' - ' hash-type consistent\n' - ' server nfs.foo.0 192.168.122.111:12049\n' - } + 'files': {'haproxy.cfg': haproxy_txt} } # verify we get the same cfg regardless of the order in which the nfs daemons are returned @@ -1344,7 +1673,7 @@ class TestIngressService: with with_host(cephadm_module, 'test', addr='1.2.3.7'): cephadm_module.cache.update_host_networks('test', { '1.2.3.0/24': { - 'if0': ['1.2.3.4/32'] + 'if0': ['1.2.3.4'] } }) @@ -1388,7 +1717,7 @@ class TestIngressService: 'auth_type PASS\n ' 'auth_pass 12345\n ' '}\n ' - 'unicast_src_ip 1.2.3.7\n ' + 'unicast_src_ip 1.2.3.4\n ' 'unicast_peer {\n ' '}\n ' 'virtual_ipaddress {\n ' @@ -1467,7 +1796,7 @@ class TestIngressService: with with_host(cephadm_module, 'test'): cephadm_module.cache.update_host_networks('test', { '1.2.3.0/24': { - 'if0': ['1.2.3.4/32'] + 'if0': ['1.2.3.1'] } }) @@ -1511,7 +1840,7 @@ class TestIngressService: 'auth_type PASS\n ' 'auth_pass 12345\n ' '}\n ' - 'unicast_src_ip 1::4\n ' + 'unicast_src_ip 1.2.3.1\n ' 'unicast_peer {\n ' '}\n ' 'virtual_ipaddress {\n ' @@ -1592,7 +1921,7 @@ class TestIngressService: with with_host(cephadm_module, 'test', addr='1.2.3.7'): cephadm_module.cache.update_host_networks('test', { '1.2.3.0/24': { - 'if0': ['1.2.3.4/32'] + 'if0': ['1.2.3.1'] } }) @@ -1637,7 +1966,7 @@ class TestIngressService: 'auth_type PASS\n ' 'auth_pass 12345\n ' '}\n ' - 'unicast_src_ip 1.2.3.7\n ' + 'unicast_src_ip 1.2.3.1\n ' 'unicast_peer {\n ' '}\n ' 'virtual_ipaddress {\n ' @@ -1709,6 +2038,201 @@ class TestIngressService: assert haproxy_generated_conf[0] == haproxy_expected_conf + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_haproxy_port_ips(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.7'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.4/32'] + } + }) + + # Check the ingress with multiple VIPs + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ip = '1.2.3.100' + frontend_port = 8089 + + ispec = IngressSpec(service_type='ingress', + service_id='test', + backend_service='rgw.foo', + frontend_port=frontend_port, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ip=f"{ip}/24") + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # generate the haproxy conf based on the specified spec + haproxy_daemon_spec = cephadm_module.cephadm_services['ingress'].prepare_create( + CephadmDaemonDeploySpec( + host='test', + daemon_type='haproxy', + daemon_id='ingress', + service_name=ispec.service_name())) + + assert haproxy_daemon_spec.port_ips == {str(frontend_port): ip} + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_keepalive_config_multi_interface_vips(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.1'): + with with_host(cephadm_module, 'test2', addr='1.2.3.2'): + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.1'] + } + }) + cephadm_module.cache.update_host_networks('test2', { + '1.2.3.0/24': { + 'if0': ['1.2.3.2'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.2'] + } + }) + + # Check the ingress with multiple VIPs + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + placement=PlacementSpec(hosts=['test', 'test2']), + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"]) + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + keepalived_generated_conf = cephadm_module.cephadm_services['ingress'].keepalived_generate_config( + CephadmDaemonDeploySpec(host='test', daemon_id='ingress', service_name=ispec.service_name())) + + keepalived_expected_conf = { + 'files': + { + 'keepalived.conf': + '# This file is generated by cephadm.\n' + 'vrrp_script check_backend {\n ' + 'script "/usr/bin/curl http://1.2.3.1:8999/health"\n ' + 'weight -20\n ' + 'interval 2\n ' + 'rise 2\n ' + 'fall 2\n}\n\n' + 'vrrp_instance VI_0 {\n ' + 'state MASTER\n ' + 'priority 100\n ' + 'interface if0\n ' + 'virtual_router_id 50\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 1.2.3.1\n ' + 'unicast_peer {\n ' + '1.2.3.2\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '1.2.3.100/24 dev if0\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + 'vrrp_instance VI_1 {\n ' + 'state BACKUP\n ' + 'priority 90\n ' + 'interface if1\n ' + 'virtual_router_id 51\n ' + 'advert_int 1\n ' + 'authentication {\n ' + 'auth_type PASS\n ' + 'auth_pass 12345\n ' + '}\n ' + 'unicast_src_ip 100.100.100.1\n ' + 'unicast_peer {\n ' + '100.100.100.2\n ' + '}\n ' + 'virtual_ipaddress {\n ' + '100.100.100.100/24 dev if1\n ' + '}\n ' + 'track_script {\n ' + 'check_backend\n }\n' + '}\n' + } + } + + # check keepalived config + assert keepalived_generated_conf[0] == keepalived_expected_conf + + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_keepalive_interface_host_filtering(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + # we need to make sure keepalive daemons will have an interface + # on the hosts we deploy them on in order to set up their VIP. + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + with with_host(cephadm_module, 'test', addr='1.2.3.1'): + with with_host(cephadm_module, 'test2', addr='1.2.3.2'): + with with_host(cephadm_module, 'test3', addr='1.2.3.3'): + with with_host(cephadm_module, 'test4', addr='1.2.3.3'): + # setup "test" and "test4" to have all the necessary interfaces, + # "test2" to have one of them (should still be filtered) + # and "test3" to have none of them + cephadm_module.cache.update_host_networks('test', { + '1.2.3.0/24': { + 'if0': ['1.2.3.1'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.1'] + } + }) + cephadm_module.cache.update_host_networks('test2', { + '1.2.3.0/24': { + 'if0': ['1.2.3.2'] + }, + }) + cephadm_module.cache.update_host_networks('test4', { + '1.2.3.0/24': { + 'if0': ['1.2.3.4'] + }, + '100.100.100.0/24': { + 'if1': ['100.100.100.4'] + } + }) + + s = RGWSpec(service_id="foo", placement=PlacementSpec(count=1), + rgw_frontend_type='beast') + + ispec = IngressSpec(service_type='ingress', + service_id='test', + placement=PlacementSpec(hosts=['test', 'test2', 'test3', 'test4']), + backend_service='rgw.foo', + frontend_port=8089, + monitor_port=8999, + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + virtual_ips_list=["1.2.3.100/24", "100.100.100.100/24"]) + with with_service(cephadm_module, s) as _, with_service(cephadm_module, ispec) as _: + # since we're never actually going to refresh the host here, + # check the tmp daemons to see what was placed during the apply + daemons = cephadm_module.cache._get_tmp_daemons() + keepalive_daemons = [d for d in daemons if d.daemon_type == 'keepalived'] + hosts_deployed_on = [d.hostname for d in keepalive_daemons] + assert 'test' in hosts_deployed_on + assert 'test2' not in hosts_deployed_on + assert 'test3' not in hosts_deployed_on + assert 'test4' in hosts_deployed_on + @patch("cephadm.serve.CephadmServe._run_cephadm") @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock()) @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock()) @@ -1720,7 +2244,7 @@ class TestIngressService: with with_host(cephadm_module, 'test', addr='1.2.3.7'): cephadm_module.cache.update_host_networks('test', { '1.2.3.0/24': { - 'if0': ['1.2.3.4/32'] + 'if0': ['1.2.3.1'] } }) @@ -1767,7 +2291,7 @@ class TestIngressService: 'auth_type PASS\n ' 'auth_pass 12345\n ' '}\n ' - 'unicast_src_ip 1.2.3.7\n ' + 'unicast_src_ip 1.2.3.1\n ' 'unicast_peer {\n ' '}\n ' 'virtual_ipaddress {\n ' @@ -1782,6 +2306,240 @@ class TestIngressService: # check keepalived config assert keepalived_generated_conf[0] == keepalived_expected_conf + @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock()) + @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock()) + @patch("cephadm.services.nfs.NFSService.purge", MagicMock()) + @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock()) + @patch("cephadm.inventory.Inventory.keys") + @patch("cephadm.inventory.Inventory.get_addr") + @patch("cephadm.utils.resolve_ip") + @patch("cephadm.inventory.HostCache.get_daemons_by_service") + @patch("cephadm.serve.CephadmServe._run_cephadm") + def test_ingress_config_nfs_proxy_protocol( + self, + _run_cephadm, + _get_daemons_by_service, + _resolve_ip, + _get_addr, + _inventory_keys, + cephadm_module: CephadmOrchestrator, + ): + """Verify that setting enable_haproxy_protocol for both ingress and + nfs services sets the desired configuration parameters in both + the haproxy config and nfs ganesha config. + """ + _run_cephadm.side_effect = async_side_effect(('{}', '', 0)) + + def fake_resolve_ip(hostname: str) -> str: + if hostname in ('host1', "192.168.122.111"): + return '192.168.122.111' + elif hostname in ('host2', '192.168.122.222'): + return '192.168.122.222' + else: + raise KeyError(hostname) + _resolve_ip.side_effect = fake_resolve_ip + _get_addr.side_effect = fake_resolve_ip + + def fake_keys(): + return ['host1', 'host2'] + _inventory_keys.side_effect = fake_keys + + nfs_service = NFSServiceSpec( + service_id="foo", + placement=PlacementSpec( + count=1, + hosts=['host1', 'host2']), + port=12049, + enable_haproxy_protocol=True, + ) + + ispec = IngressSpec( + service_type='ingress', + service_id='nfs.foo', + backend_service='nfs.foo', + frontend_port=2049, + monitor_port=9049, + virtual_ip='192.168.122.100/24', + monitor_user='admin', + monitor_password='12345', + keepalived_password='12345', + enable_haproxy_protocol=True, + ) + + cephadm_module.spec_store._specs = { + 'nfs.foo': nfs_service, + 'ingress.nfs.foo': ispec + } + cephadm_module.spec_store.spec_created = { + 'nfs.foo': datetime_now(), + 'ingress.nfs.foo': datetime_now() + } + + haproxy_txt = ( + '# This file is generated by cephadm.\n' + 'global\n' + ' log 127.0.0.1 local2\n' + ' chroot /var/lib/haproxy\n' + ' pidfile /var/lib/haproxy/haproxy.pid\n' + ' maxconn 8000\n' + ' daemon\n' + ' stats socket /var/lib/haproxy/stats\n\n' + 'defaults\n' + ' mode tcp\n' + ' log global\n' + ' timeout queue 1m\n' + ' timeout connect 10s\n' + ' timeout client 1m\n' + ' timeout server 1m\n' + ' timeout check 10s\n' + ' maxconn 8000\n\n' + 'frontend stats\n' + ' mode http\n' + ' bind 192.168.122.100:9049\n' + ' bind 192.168.122.111:9049\n' + ' stats enable\n' + ' stats uri /stats\n' + ' stats refresh 10s\n' + ' stats auth admin:12345\n' + ' http-request use-service prometheus-exporter if { path /metrics }\n' + ' monitor-uri /health\n\n' + 'frontend frontend\n' + ' bind 192.168.122.100:2049\n' + ' default_backend backend\n\n' + 'backend backend\n' + ' mode tcp\n' + ' balance source\n' + ' hash-type consistent\n' + ' default-server send-proxy-v2\n' + ' server nfs.foo.0 192.168.122.111:12049\n' + ) + haproxy_expected_conf = { + 'files': {'haproxy.cfg': haproxy_txt} + } + + nfs_ganesha_txt = ( + "# This file is generated by cephadm.\n" + 'NFS_CORE_PARAM {\n' + ' Enable_NLM = false;\n' + ' Enable_RQUOTA = false;\n' + ' Protocols = 4;\n' + ' NFS_Port = 2049;\n' + ' HAProxy_Hosts = 192.168.122.111, 10.10.2.20, 192.168.122.222;\n' + '}\n' + '\n' + 'NFSv4 {\n' + ' Delegations = false;\n' + " RecoveryBackend = 'rados_cluster';\n" + ' Minor_Versions = 1, 2;\n' + '}\n' + '\n' + 'RADOS_KV {\n' + ' UserId = "nfs.foo.test.0.0";\n' + ' nodeid = "nfs.foo.None";\n' + ' pool = ".nfs";\n' + ' namespace = "foo";\n' + '}\n' + '\n' + 'RADOS_URLS {\n' + ' UserId = "nfs.foo.test.0.0";\n' + ' watch_url = ' + '"rados://.nfs/foo/conf-nfs.foo";\n' + '}\n' + '\n' + 'RGW {\n' + ' cluster = "ceph";\n' + ' name = "client.nfs.foo.test.0.0-rgw";\n' + '}\n' + '\n' + "%url rados://.nfs/foo/conf-nfs.foo" + ) + nfs_expected_conf = { + 'files': {'ganesha.conf': nfs_ganesha_txt}, + 'config': '', + 'extra_args': ['-N', 'NIV_EVENT'], + 'keyring': ( + '[client.nfs.foo.test.0.0]\n' + 'key = None\n' + ), + 'namespace': 'foo', + 'pool': '.nfs', + 'rgw': { + 'cluster': 'ceph', + 'keyring': ( + '[client.nfs.foo.test.0.0-rgw]\n' + 'key = None\n' + ), + 'user': 'nfs.foo.test.0.0-rgw', + }, + 'userid': 'nfs.foo.test.0.0', + } + + nfs_daemons = [ + DaemonDescription( + daemon_type='nfs', + daemon_id='foo.0.1.host1.qwerty', + hostname='host1', + rank=0, + rank_generation=1, + ports=[12049], + ), + DaemonDescription( + daemon_type='nfs', + daemon_id='foo.0.0.host2.abcdef', + hostname='host2', + rank=0, + rank_generation=0, + ports=[12049], + ), + ] + _get_daemons_by_service.return_value = nfs_daemons + + ingress_svc = cephadm_module.cephadm_services['ingress'] + nfs_svc = cephadm_module.cephadm_services['nfs'] + + # add host network info to one host to test the behavior of + # adding all known-good addresses of the host to the list. + cephadm_module.cache.update_host_networks('host1', { + # this one is additional + '10.10.2.0/24': { + 'eth1': ['10.10.2.20'] + }, + # this is redundant and will be skipped + '192.168.122.0/24': { + 'eth0': ['192.168.122.111'] + }, + # this is a link-local address and will be ignored + "fe80::/64": { + "veth0": [ + "fe80::8cf5:25ff:fe1c:d963" + ], + "eth0": [ + "fe80::c7b:cbff:fef6:7370" + ], + "eth1": [ + "fe80::7201:25a7:390b:d9a7" + ] + }, + }) + + haproxy_generated_conf, _ = ingress_svc.haproxy_generate_config( + CephadmDaemonDeploySpec( + host='host1', + daemon_id='ingress', + service_name=ispec.service_name(), + ), + ) + assert haproxy_generated_conf == haproxy_expected_conf + + nfs_generated_conf, _ = nfs_svc.generate_config( + CephadmDaemonDeploySpec( + host='test', + daemon_id='foo.test.0.0', + service_name=nfs_service.service_name(), + ), + ) + assert nfs_generated_conf == nfs_expected_conf + class TestCephFsMirror: @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -1809,19 +2567,29 @@ class TestJaeger: with with_service(cephadm_module, spec): _run_cephadm.assert_called_with( 'test', - 'jaeger-query.test', - 'deploy', - [ - '--name', 'jaeger-query.test', - '--meta-json', - ('{"service_name": "jaeger-query", "ports": [16686], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '16686' - - ], - stdin=json.dumps(config), - image='' + "jaeger-query.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-query.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [16686], + }, + "meta": { + 'service_name': 'jaeger-query', + 'ports': [16686], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": config, + }), ) @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -1837,37 +2605,57 @@ class TestJaeger: "elasticsearch_nodes": f'http://{build_url(host=cephadm_module.inventory.get_addr("test"), port=9200).lstrip("/")}'} with with_service(cephadm_module, es_spec): _run_cephadm.assert_called_with( - 'test', - 'elasticsearch.test', - 'deploy', - [ - '--name', 'elasticsearch.test', - '--meta-json', - ('{"service_name": "elasticsearch", "ports": [9200], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '9200' - - ], - stdin=json.dumps(es_config), - image='' + "test", + "elasticsearch.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'elasticsearch.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [9200], + }, + "meta": { + 'service_name': 'elasticsearch', + 'ports': [9200], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": es_config, + }), ) with with_service(cephadm_module, collector_spec): _run_cephadm.assert_called_with( - 'test', - 'jaeger-collector.test', - 'deploy', - [ - '--name', 'jaeger-collector.test', - '--meta-json', - ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '14250' - - ], - stdin=json.dumps(collector_config), - image='' + "test", + "jaeger-collector.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-collector.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [14250], + }, + "meta": { + 'service_name': 'jaeger-collector', + 'ports': [14250], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": collector_config, + }), ) @patch("cephadm.serve.CephadmServe._run_cephadm") @@ -1883,35 +2671,55 @@ class TestJaeger: with with_host(cephadm_module, 'test'): with with_service(cephadm_module, collector_spec): _run_cephadm.assert_called_with( - 'test', - 'jaeger-collector.test', - 'deploy', - [ - '--name', 'jaeger-collector.test', - '--meta-json', - ('{"service_name": "jaeger-collector", "ports": [14250], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '14250' - - ], - stdin=json.dumps(collector_config), - image='' + "test", + "jaeger-collector.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-collector.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [14250], + }, + "meta": { + 'service_name': 'jaeger-collector', + 'ports': [14250], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": collector_config, + }), ) with with_service(cephadm_module, agent_spec): _run_cephadm.assert_called_with( - 'test', - 'jaeger-agent.test', - 'deploy', - [ - '--name', 'jaeger-agent.test', - '--meta-json', - ('{"service_name": "jaeger-agent", "ports": [6799], "ip": null, "deployed_by": [], "rank": null, ' - '"rank_generation": null, "extra_container_args": null, "extra_entrypoint_args": null}'), - '--config-json', '-', - '--tcp-ports', '6799' - - ], - stdin=json.dumps(agent_config), - image='' + "test", + "jaeger-agent.test", + ['_orch', 'deploy'], + [], + stdin=json.dumps({ + "fsid": "fsid", + "name": 'jaeger-agent.test', + "image": '', + "deploy_arguments": [], + "params": { + 'tcp_ports': [6799], + }, + "meta": { + 'service_name': 'jaeger-agent', + 'ports': [6799], + 'ip': None, + 'deployed_by': [], + 'rank': None, + 'rank_generation': None, + 'extra_container_args': None, + 'extra_entrypoint_args': None, + }, + "config_blobs": agent_config, + }), ) diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_ssh.py b/ceph/src/pybind/mgr/cephadm/tests/test_ssh.py index 4197d8d7e..29f01b6c7 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_ssh.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_ssh.py @@ -1,3 +1,5 @@ +import asyncssh +from asyncssh.process import SSHCompletedProcess from unittest import mock try: # AsyncMock was not added until python 3.8 @@ -19,6 +21,7 @@ from ceph.deployment.hostspec import HostSpec from cephadm import CephadmOrchestrator from cephadm.serve import CephadmServe from cephadm.tests.fixtures import with_host, wait, async_side_effect +from orchestrator import OrchestratorError @pytest.mark.skipif(ConnectionLost is None, reason='no asyncssh') @@ -49,6 +52,52 @@ class TestWithSSH: out = wait(cephadm_module, cephadm_module.get_hosts())[0].to_json() assert out == HostSpec('test', '1::4').to_json() + def test_ssh_remote_cmds_execution(self, cephadm_module): + + if not AsyncMock: + # can't run this test if we could not import AsyncMock + return + + class FakeConn: + def __init__(self, exception=None, returncode=0): + self.exception = exception + self.returncode = returncode + + async def run(self, *args, **kwargs): + if self.exception: + raise self.exception + else: + return SSHCompletedProcess(returncode=self.returncode, stdout="", stderr="") + + async def close(self): + pass + + def run_test(host, conn, expected_error): + mock_connect = AsyncMock(return_value=conn) + with pytest.raises(OrchestratorError, match=expected_error): + with mock.patch("asyncssh.connect", new=mock_connect): + with with_host(cephadm_module, host): + CephadmServe(cephadm_module)._check_host(host) + + # Test case 1: command failure + run_test('test1', FakeConn(returncode=1), "Command .+ failed") + + # Test case 2: connection error + run_test('test2', FakeConn(exception=asyncssh.ChannelOpenError(1, "", "")), "Unable to reach remote host test2.") + + # Test case 3: asyncssh ProcessError + stderr = "my-process-stderr" + run_test('test3', FakeConn(exception=asyncssh.ProcessError(returncode=3, + env="", + command="", + subsystem="", + exit_status="", + exit_signal="", + stderr=stderr, + stdout="")), f"Cannot execute the command.+{stderr}") + # Test case 4: generic error + run_test('test4', FakeConn(exception=Exception), "Generic error while executing command.+") + @pytest.mark.skipif(ConnectionLost is not None, reason='asyncssh') class TestWithoutSSH: diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py b/ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py index 38a3a3907..66feaee31 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_tuned_profiles.py @@ -37,6 +37,15 @@ class FakeCache: def get_draining_hosts(self): return [] + def is_host_unreachable(self, hostname: str): + return hostname in [h.hostname for h in self.get_unreachable_hosts()] + + def is_host_schedulable(self, hostname: str): + return hostname in [h.hostname for h in self.get_schedulable_hosts()] + + def is_host_draining(self, hostname: str): + return hostname in [h.hostname for h in self.get_draining_hosts()] + @property def networks(self): return {h: {'a': {'b': ['c']}} for h in self.hosts} @@ -165,6 +174,32 @@ class TestTunedProfiles: _write_remote_file.assert_called_with( 'a', f'{SYSCTL_DIR}/p2-cephadm-tuned-profile.conf', tp._profile_to_str(self.tspec2).encode('utf-8')) + def test_dont_write_to_unreachable_hosts(self): + profiles = {'p1': self.tspec1, 'p2': self.tspec2, 'p3': self.tspec3} + + # list host "a" and "b" as hosts that exist, "a" will be + # a normal, schedulable host and "b" is considered unreachable + mgr = FakeMgr(['a', 'b'], + ['a'], + ['b'], + profiles) + tp = TunedProfileUtils(mgr) + + assert 'a' not in tp.mgr.cache.last_tuned_profile_update + assert 'b' not in tp.mgr.cache.last_tuned_profile_update + + # with an online host, should proceed as normal. Providing + # no actual profiles here though so the only actual action taken + # is updating the entry in the last_tuned_profile_update dict + tp._write_tuned_profiles('a', {}) + assert 'a' in tp.mgr.cache.last_tuned_profile_update + + # trying to write to an unreachable host should be a no-op + # and return immediately. No entry for 'b' should be added + # to the last_tuned_profile_update dict + tp._write_tuned_profiles('b', {}) + assert 'b' not in tp.mgr.cache.last_tuned_profile_update + def test_store(self): mgr = FakeMgr(['a', 'b', 'c'], ['a', 'b', 'c'], diff --git a/ceph/src/pybind/mgr/cephadm/tests/test_upgrade.py b/ceph/src/pybind/mgr/cephadm/tests/test_upgrade.py index 7aa46f902..3b5c305b5 100644 --- a/ceph/src/pybind/mgr/cephadm/tests/test_upgrade.py +++ b/ceph/src/pybind/mgr/cephadm/tests/test_upgrade.py @@ -7,6 +7,7 @@ from ceph.deployment.service_spec import PlacementSpec, ServiceSpec from cephadm import CephadmOrchestrator from cephadm.upgrade import CephadmUpgrade, UpgradeState from cephadm.ssh import HostConnectionError +from cephadm.utils import ContainerInspectInfo from orchestrator import OrchestratorError, DaemonDescription from .fixtures import _run_cephadm, wait, with_host, with_service, \ receive_agent_metadata, async_side_effect @@ -80,6 +81,30 @@ def test_upgrade_resume_clear_health_warnings(_rm_health_warning, cephadm_module _rm_health_warning.assert_has_calls(calls_list, any_order=True) +@mock.patch('cephadm.upgrade.CephadmUpgrade._get_current_version', lambda _: (17, 2, 6)) +@mock.patch("cephadm.serve.CephadmServe._get_container_image_info") +def test_upgrade_check_with_ceph_version(_get_img_info, cephadm_module: CephadmOrchestrator): + # This test was added to avoid screwing up the image base so that + # when the version was added to it it made an incorrect image + # The issue caused the image to come out as + # quay.io/ceph/ceph:v18:v18.2.0 + # see https://tracker.ceph.com/issues/63150 + _img = '' + + def _fake_get_img_info(img_name): + nonlocal _img + _img = img_name + return ContainerInspectInfo( + 'image_id', + '18.2.0', + 'digest' + ) + + _get_img_info.side_effect = _fake_get_img_info + cephadm_module.upgrade_check('', '18.2.0') + assert _img == 'quay.io/ceph/ceph:v18.2.0' + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}')) @pytest.mark.parametrize("use_repo_digest", [ diff --git a/ceph/src/pybind/mgr/cephadm/tuned_profiles.py b/ceph/src/pybind/mgr/cephadm/tuned_profiles.py index 19d97f421..8ec30bd53 100644 --- a/ceph/src/pybind/mgr/cephadm/tuned_profiles.py +++ b/ceph/src/pybind/mgr/cephadm/tuned_profiles.py @@ -67,7 +67,7 @@ class TunedProfileUtils(): SHOULD be on the host. Then if we see any file names that don't match this, but DO include "-cephadm-tuned-profile.conf" (implying they're from us), remove them. """ - if host in self.mgr.offline_hosts: + if self.mgr.cache.is_host_unreachable(host): return cmd = ['ls', SYSCTL_DIR] found_files = self.mgr.ssh.check_execute_command(host, cmd, log_command=self.mgr.log_refresh_metadata).split('\n') @@ -88,7 +88,7 @@ class TunedProfileUtils(): self.mgr.ssh.check_execute_command(host, ['sysctl', '--system']) def _write_tuned_profiles(self, host: str, profiles: List[Dict[str, str]]) -> None: - if host in self.mgr.offline_hosts: + if self.mgr.cache.is_host_unreachable(host): return updated = False for p in profiles: diff --git a/ceph/src/pybind/mgr/cephadm/upgrade.py b/ceph/src/pybind/mgr/cephadm/upgrade.py index 552964c84..eeae37580 100644 --- a/ceph/src/pybind/mgr/cephadm/upgrade.py +++ b/ceph/src/pybind/mgr/cephadm/upgrade.py @@ -9,7 +9,7 @@ from cephadm.registry import Registry from cephadm.serve import CephadmServe from cephadm.services.cephadmservice import CephadmDaemonDeploySpec from cephadm.utils import ceph_release_to_major, name_to_config_section, CEPH_UPGRADE_ORDER, \ - MONITORING_STACK_TYPES, CEPH_TYPES, GATEWAY_TYPES + CEPH_TYPES, NON_CEPH_IMAGE_TYPES, GATEWAY_TYPES from cephadm.ssh import HostConnectionError from orchestrator import OrchestratorError, DaemonDescription, DaemonDescriptionStatus, daemon_type_to_service @@ -267,6 +267,9 @@ class CephadmUpgrade: if not image: image = self.mgr.container_image_base reg_name, bare_image = image.split('/', 1) + if ':' in bare_image: + # for our purposes, we don't want to use the tag here + bare_image = bare_image.split(':')[0] reg = Registry(reg_name) (current_major, current_minor, _) = self._get_current_version() versions = [] @@ -395,7 +398,7 @@ class CephadmUpgrade: # in order for the user's selection of daemons to upgrade to be valid. for example, # if they say --daemon-types 'osd,mds' but mons have not been upgraded, we block. daemons = [d for d in self.mgr.cache.get_daemons( - ) if d.daemon_type not in MONITORING_STACK_TYPES] + ) if d.daemon_type not in NON_CEPH_IMAGE_TYPES] err_msg_base = 'Cannot start upgrade. ' # "dtypes" will later be filled in with the types of daemons that will be upgraded with the given parameters dtypes = [] @@ -764,7 +767,7 @@ class CephadmUpgrade: if ( (self.mgr.use_repo_digest and d.matches_digests(target_digests)) or (not self.mgr.use_repo_digest and d.matches_image_name(target_name)) - or (d.daemon_type in MONITORING_STACK_TYPES) + or (d.daemon_type in NON_CEPH_IMAGE_TYPES) ): logger.debug('daemon %s.%s on correct image' % ( d.daemon_type, d.daemon_id)) @@ -1171,7 +1174,7 @@ class CephadmUpgrade: # and monitoring stack daemons. Additionally, this case is only valid if # the active mgr is already upgraded. if any(d in target_digests for d in self.mgr.get_active_mgr_digests()): - if daemon_type not in MONITORING_STACK_TYPES and daemon_type != 'mgr': + if daemon_type not in NON_CEPH_IMAGE_TYPES and daemon_type != 'mgr': continue else: self._mark_upgrade_complete() @@ -1184,8 +1187,8 @@ class CephadmUpgrade: upgraded_daemon_count += done self._update_upgrade_progress(upgraded_daemon_count / len(daemons)) - # make sure mgr and monitoring stack daemons are properly redeployed in staggered upgrade scenarios - if daemon_type == 'mgr' or daemon_type in MONITORING_STACK_TYPES: + # make sure mgr and non-ceph-image daemons are properly redeployed in staggered upgrade scenarios + if daemon_type == 'mgr' or daemon_type in NON_CEPH_IMAGE_TYPES: if any(d in target_digests for d in self.mgr.get_active_mgr_digests()): need_upgrade_names = [d[0].name() for d in need_upgrade] + \ [d[0].name() for d in need_upgrade_deployer] diff --git a/ceph/src/pybind/mgr/cephadm/utils.py b/ceph/src/pybind/mgr/cephadm/utils.py index 6f8e022d6..63672936c 100644 --- a/ceph/src/pybind/mgr/cephadm/utils.py +++ b/ceph/src/pybind/mgr/cephadm/utils.py @@ -23,7 +23,7 @@ class CephadmNoImage(Enum): # NOTE: order important here as these are used for upgrade order CEPH_TYPES = ['mgr', 'mon', 'crash', 'osd', 'mds', 'rgw', 'rbd-mirror', 'cephfs-mirror', 'ceph-exporter'] -GATEWAY_TYPES = ['iscsi', 'nfs'] +GATEWAY_TYPES = ['iscsi', 'nfs', 'nvmeof'] MONITORING_STACK_TYPES = ['node-exporter', 'prometheus', 'alertmanager', 'grafana', 'loki', 'promtail'] RESCHEDULE_FROM_OFFLINE_HOSTS_TYPES = ['haproxy', 'nfs'] @@ -33,6 +33,11 @@ CEPH_UPGRADE_ORDER = CEPH_TYPES + GATEWAY_TYPES + MONITORING_STACK_TYPES # these daemon types use the ceph container image CEPH_IMAGE_TYPES = CEPH_TYPES + ['iscsi', 'nfs'] +# these daemons do not use the ceph image. There are other daemons +# that also don't use the ceph image, but we only care about those +# that are part of the upgrade order here +NON_CEPH_IMAGE_TYPES = MONITORING_STACK_TYPES + ['nvmeof'] + # Used for _run_cephadm used for check-host etc that don't require an --image parameter cephadmNoImage = CephadmNoImage.token @@ -43,12 +48,22 @@ class ContainerInspectInfo(NamedTuple): repo_digests: Optional[List[str]] +class SpecialHostLabels(str, Enum): + ADMIN: str = '_admin' + NO_MEMORY_AUTOTUNE: str = '_no_autotune_memory' + DRAIN_DAEMONS: str = '_no_schedule' + DRAIN_CONF_KEYRING: str = '_no_conf_keyring' + + def to_json(self) -> str: + return self.value + + def name_to_config_section(name: str) -> ConfEntity: """ Map from daemon names to ceph entity names (as seen in config) """ daemon_type = name.split('.', 1)[0] - if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash', 'iscsi', 'ceph-exporter']: + if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash', 'iscsi', 'ceph-exporter', 'nvmeof']: return ConfEntity('client.' + name) elif daemon_type in ['mon', 'osd', 'mds', 'mgr', 'client']: return ConfEntity(name) diff --git a/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh index 83b2eb694..a48f759f5 100755 --- a/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh +++ b/ceph/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh @@ -38,6 +38,8 @@ cypress_run () { cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend +kcli ssh -u root ceph-node-00 'cephadm shell "ceph config set mgr mgr/prometheus/exclude_perf_counters false"' + # check if the prometheus daemon is running # before starting the e2e tests diff --git a/ceph/src/pybind/mgr/dashboard/controllers/cephfs.py b/ceph/src/pybind/mgr/dashboard/controllers/cephfs.py index 7b7589d42..09b2bebfc 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/cephfs.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/cephfs.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- +import json import logging import os from collections import defaultdict +from typing import Any, Dict import cephfs import cherrypy @@ -12,8 +14,9 @@ from ..security import Scope from ..services.ceph_service import CephService from ..services.cephfs import CephFS as CephFS_ from ..services.exception import handle_cephfs_error -from ..tools import ViewCache -from . import APIDoc, APIRouter, EndpointDoc, RESTController, UIRouter, allow_empty_body +from ..tools import ViewCache, str_to_bool +from . import APIDoc, APIRouter, DeletePermission, Endpoint, EndpointDoc, \ + RESTController, UIRouter, UpdatePermission, allow_empty_body GET_QUOTAS_SCHEMA = { 'max_bytes': (int, ''), @@ -23,6 +26,7 @@ GET_QUOTAS_SCHEMA = { logger = logging.getLogger("controllers.rgw") +# pylint: disable=R0904 @APIRouter('/cephfs', Scope.CEPHFS) @APIDoc("Cephfs Management API", "Cephfs") class CephFS(RESTController): @@ -37,6 +41,59 @@ class CephFS(RESTController): fsmap = mgr.get("fs_map") return fsmap['filesystems'] + def create(self, name: str, service_spec: Dict[str, Any]): + service_spec_str = '1 ' + if 'labels' in service_spec['placement']: + for label in service_spec['placement']['labels']: + service_spec_str += f'label:{label},' + service_spec_str = service_spec_str[:-1] + if 'hosts' in service_spec['placement']: + for host in service_spec['placement']['hosts']: + service_spec_str += f'{host},' + service_spec_str = service_spec_str[:-1] + + error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_create', None, + {'name': name, 'placement': service_spec_str}) + if error_code != 0: + raise RuntimeError( + f'Error creating volume {name} with placement {str(service_spec)}: {err}') + return f'Volume {name} created successfully' + + @EndpointDoc("Remove CephFS Volume", + parameters={ + 'name': (str, 'File System Name'), + }) + @allow_empty_body + @Endpoint('DELETE') + @DeletePermission + def remove(self, name): + error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_rm', None, + {'vol_name': name, + 'yes-i-really-mean-it': "--yes-i-really-mean-it"}) + if error_code != 0: + raise DashboardException( + msg=f'Error deleting volume {name}: {err}', + component='cephfs') + return f'Volume {name} removed successfully' + + @EndpointDoc("Rename CephFS Volume", + parameters={ + 'name': (str, 'Existing FS Name'), + 'new_name': (str, 'New FS Name'), + }) + @allow_empty_body + @UpdatePermission + @Endpoint('PUT') + def rename(self, name: str, new_name: str): + error_code, _, err = mgr.remote('volumes', '_cmd_fs_volume_rename', None, + {'vol_name': name, 'new_vol_name': new_name, + 'yes_i_really_mean_it': True}) + if error_code != 0: + raise DashboardException( + msg=f'Error renaming volume {name} to {new_name}: {err}', + component='cephfs') + return f'Volume {name} renamed successfully to {new_name}' + def get(self, fs_id): fs_id = self.fs_id_to_int(fs_id) return self.fs_status(fs_id) @@ -560,3 +617,149 @@ class CephFsUi(CephFS): except (cephfs.PermissionError, cephfs.ObjectNotFound): # pragma: no cover paths = [] return paths + + +@APIRouter('/cephfs/subvolume', Scope.CEPHFS) +@APIDoc('CephFS Subvolume Management API', 'CephFSSubvolume') +class CephFSSubvolume(RESTController): + + def get(self, vol_name: str, group_name: str = ""): + params = {'vol_name': vol_name} + if group_name: + params['group_name'] = group_name + error_code, out, err = mgr.remote( + 'volumes', '_cmd_fs_subvolume_ls', None, params) + if error_code != 0: + raise DashboardException( + f'Failed to list subvolumes for volume {vol_name}: {err}' + ) + subvolumes = json.loads(out) + for subvolume in subvolumes: + params['sub_name'] = subvolume['name'] + error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None, + params) + if error_code != 0: + raise DashboardException( + f'Failed to get info for subvolume {subvolume["name"]}: {err}' + ) + subvolume['info'] = json.loads(out) + return subvolumes + + @RESTController.Resource('GET') + def info(self, vol_name: str, subvol_name: str, group_name: str = ""): + params = {'vol_name': vol_name, 'sub_name': subvol_name} + if group_name: + params['group_name'] = group_name + error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolume_info', None, + params) + if error_code != 0: + raise DashboardException( + f'Failed to get info for subvolume {subvol_name}: {err}' + ) + return json.loads(out) + + def create(self, vol_name: str, subvol_name: str, **kwargs): + error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_create', None, { + 'vol_name': vol_name, 'sub_name': subvol_name, **kwargs}) + if error_code != 0: + raise DashboardException( + f'Failed to create subvolume {subvol_name}: {err}' + ) + + return f'Subvolume {subvol_name} created successfully' + + def set(self, vol_name: str, subvol_name: str, size: str, group_name: str = ""): + params = {'vol_name': vol_name, 'sub_name': subvol_name} + if size: + params['new_size'] = size + if group_name: + params['group_name'] = group_name + error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolume_resize', None, + params) + if error_code != 0: + raise DashboardException( + f'Failed to update subvolume {subvol_name}: {err}' + ) + + return f'Subvolume {subvol_name} updated successfully' + + def delete(self, vol_name: str, subvol_name: str, group_name: str = "", + retain_snapshots: bool = False): + params = {'vol_name': vol_name, 'sub_name': subvol_name} + if group_name: + params['group_name'] = group_name + retain_snapshots = str_to_bool(retain_snapshots) + if retain_snapshots: + params['retain_snapshots'] = 'True' + error_code, _, err = mgr.remote( + 'volumes', '_cmd_fs_subvolume_rm', None, params) + if error_code != 0: + raise DashboardException( + msg=f'Failed to remove subvolume {subvol_name}: {err}', + component='cephfs') + return f'Subvolume {subvol_name} removed successfully' + + +@APIRouter('/cephfs/subvolume/group', Scope.CEPHFS) +@APIDoc("Cephfs Subvolume Group Management API", "CephfsSubvolumeGroup") +class CephFSSubvolumeGroups(RESTController): + + def get(self, vol_name): + if not vol_name: + raise DashboardException( + f'Error listing subvolume groups for {vol_name}') + error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_ls', + None, {'vol_name': vol_name}) + if error_code != 0: + raise DashboardException( + f'Error listing subvolume groups for {vol_name}') + subvolume_groups = json.loads(out) + for group in subvolume_groups: + error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info', + None, {'vol_name': vol_name, + 'group_name': group['name']}) + if error_code != 0: + raise DashboardException( + f'Failed to get info for subvolume group {group["name"]}: {err}' + ) + group['info'] = json.loads(out) + return subvolume_groups + + @RESTController.Resource('GET') + def info(self, vol_name: str, group_name: str): + error_code, out, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_info', None, { + 'vol_name': vol_name, 'group_name': group_name}) + if error_code != 0: + raise DashboardException( + f'Failed to get info for subvolume group {group_name}: {err}' + ) + return json.loads(out) + + def create(self, vol_name: str, group_name: str, **kwargs): + error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_create', None, { + 'vol_name': vol_name, 'group_name': group_name, **kwargs}) + if error_code != 0: + raise DashboardException( + f'Failed to create subvolume group {group_name}: {err}' + ) + + def set(self, vol_name: str, group_name: str, size: str): + if not size: + return f'Failed to update subvolume group {group_name}, size was not provided' + error_code, _, err = mgr.remote('volumes', '_cmd_fs_subvolumegroup_resize', None, { + 'vol_name': vol_name, 'group_name': group_name, 'new_size': size}) + if error_code != 0: + raise DashboardException( + f'Failed to update subvolume group {group_name}: {err}' + ) + return f'Subvolume group {group_name} updated successfully' + + def delete(self, vol_name: str, group_name: str): + error_code, _, err = mgr.remote( + 'volumes', '_cmd_fs_subvolumegroup_rm', None, { + 'vol_name': vol_name, 'group_name': group_name}) + if error_code != 0: + raise DashboardException( + f'Failed to delete subvolume group {group_name}: {err}' + ) + return f'Subvolume group {group_name} removed successfully' diff --git a/ceph/src/pybind/mgr/dashboard/controllers/cluster.py b/ceph/src/pybind/mgr/dashboard/controllers/cluster.py index d8170e672..5091457ec 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/cluster.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/cluster.py @@ -1,9 +1,16 @@ # -*- coding: utf-8 -*- +from typing import Dict, List, Optional + from ..security import Scope from ..services.cluster import ClusterModel -from . import APIDoc, APIRouter, EndpointDoc, RESTController +from ..services.exception import handle_orchestrator_error +from ..services.orchestrator import OrchClient, OrchFeature +from ..tools import str_to_bool +from . import APIDoc, APIRouter, CreatePermission, Endpoint, EndpointDoc, \ + ReadPermission, RESTController, UpdatePermission, allow_empty_body from ._version import APIVersion +from .orchestrator import raise_if_no_orchestrator @APIRouter('/cluster', Scope.CONFIG_OPT) @@ -18,4 +25,77 @@ class Cluster(RESTController): @EndpointDoc("Update the cluster status", parameters={'status': (str, 'Cluster Status')}) def singleton_set(self, status: str): - ClusterModel(status).to_db() + ClusterModel(status).to_db() # -*- coding: utf-8 -*- + + +@APIRouter('/cluster/upgrade', Scope.CONFIG_OPT) +@APIDoc("Upgrade Management API", "Upgrade") +class ClusterUpgrade(RESTController): + @RESTController.MethodMap() + @raise_if_no_orchestrator([OrchFeature.UPGRADE_LIST]) + @handle_orchestrator_error('upgrade') + @EndpointDoc("Get the available versions to upgrade", + parameters={ + 'image': (str, 'Ceph Image'), + 'tags': (bool, 'Show all image tags'), + 'show_all_versions': (bool, 'Show all available versions') + }) + @ReadPermission + def list(self, tags: bool = False, image: Optional[str] = None, + show_all_versions: Optional[bool] = False) -> Dict: + orch = OrchClient.instance() + available_upgrades = orch.upgrades.list(image, str_to_bool(tags), + str_to_bool(show_all_versions)) + return available_upgrades + + @Endpoint() + @raise_if_no_orchestrator([OrchFeature.UPGRADE_STATUS]) + @handle_orchestrator_error('upgrade') + @EndpointDoc("Get the cluster upgrade status") + @ReadPermission + def status(self) -> Dict: + orch = OrchClient.instance() + status = orch.upgrades.status().to_json() + return status + + @Endpoint('POST') + @raise_if_no_orchestrator([OrchFeature.UPGRADE_START]) + @handle_orchestrator_error('upgrade') + @EndpointDoc("Start the cluster upgrade") + @CreatePermission + def start(self, image: Optional[str] = None, version: Optional[str] = None, + daemon_types: Optional[List[str]] = None, host_placement: Optional[str] = None, + services: Optional[List[str]] = None, limit: Optional[int] = None) -> str: + orch = OrchClient.instance() + start = orch.upgrades.start(image, version, daemon_types, host_placement, services, limit) + return start + + @Endpoint('PUT') + @raise_if_no_orchestrator([OrchFeature.UPGRADE_PAUSE]) + @handle_orchestrator_error('upgrade') + @EndpointDoc("Pause the cluster upgrade") + @UpdatePermission + @allow_empty_body + def pause(self) -> str: + orch = OrchClient.instance() + return orch.upgrades.pause() + + @Endpoint('PUT') + @raise_if_no_orchestrator([OrchFeature.UPGRADE_RESUME]) + @handle_orchestrator_error('upgrade') + @EndpointDoc("Resume the cluster upgrade") + @UpdatePermission + @allow_empty_body + def resume(self) -> str: + orch = OrchClient.instance() + return orch.upgrades.resume() + + @Endpoint('PUT') + @raise_if_no_orchestrator([OrchFeature.UPGRADE_STOP]) + @handle_orchestrator_error('upgrade') + @EndpointDoc("Stop the cluster upgrade") + @UpdatePermission + @allow_empty_body + def stop(self) -> str: + orch = OrchClient.instance() + return orch.upgrades.stop() diff --git a/ceph/src/pybind/mgr/dashboard/controllers/daemon.py b/ceph/src/pybind/mgr/dashboard/controllers/daemon.py index eeea5a326..d5c288131 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/daemon.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/daemon.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from typing import Optional +from typing import List, Optional from ..exceptions import DashboardException from ..security import Scope @@ -31,3 +31,19 @@ class Daemon(RESTController): orch = OrchClient.instance() res = orch.daemons.action(action=action, daemon_name=daemon_name, image=container_image) return res + + @raise_if_no_orchestrator([OrchFeature.DAEMON_LIST]) + @handle_orchestrator_error('daemon') + @RESTController.MethodMap(version=APIVersion.DEFAULT) + def list(self, daemon_types: Optional[List[str]] = None): + """List all daemons in the cluster. Also filter by the daemon types specified + + :param daemon_types: List of daemon types to filter by. + :return: Returns list of daemons. + :rtype: list + """ + orch = OrchClient.instance() + daemons = [d.to_dict() for d in orch.services.list_daemons()] + if daemon_types: + daemons = [d for d in daemons if d['daemon_type'] in daemon_types] + return daemons diff --git a/ceph/src/pybind/mgr/dashboard/controllers/host.py b/ceph/src/pybind/mgr/dashboard/controllers/host.py index 9faaa5192..812b9c035 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/host.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/host.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import copy import os import time from collections import Counter @@ -8,11 +7,12 @@ from typing import Dict, List, Optional import cherrypy from mgr_util import merge_dicts -from orchestrator import HostSpec from .. import mgr from ..exceptions import DashboardException +from ..plugins.ttl_cache import ttl_cache, ttl_cache_invalidator from ..security import Scope +from ..services._paginate import ListPaginator from ..services.ceph_service import CephService from ..services.exception import handle_orchestrator_error from ..services.orchestrator import OrchClient, OrchFeature @@ -117,51 +117,6 @@ def host_task(name, metadata, wait_for=10.0): return Task("host/{}".format(name), metadata, wait_for) -def merge_hosts_by_hostname(ceph_hosts, orch_hosts): - # type: (List[dict], List[HostSpec]) -> List[dict] - """ - Merge Ceph hosts with orchestrator hosts by hostnames. - - :param ceph_hosts: hosts returned from mgr - :type ceph_hosts: list of dict - :param orch_hosts: hosts returned from ochestrator - :type orch_hosts: list of HostSpec - :return list of dict - """ - hosts = copy.deepcopy(ceph_hosts) - orch_hosts_map = {host.hostname: host.to_json() for host in orch_hosts} - - # Sort labels. - for hostname in orch_hosts_map: - orch_hosts_map[hostname]['labels'].sort() - - # Hosts in both Ceph and Orchestrator. - for host in hosts: - hostname = host['hostname'] - if hostname in orch_hosts_map: - host.update(orch_hosts_map[hostname]) - host['sources']['orchestrator'] = True - orch_hosts_map.pop(hostname) - - # Hosts only in Orchestrator. - orch_hosts_only = [ - merge_dicts( - { - 'ceph_version': '', - 'services': [], - 'sources': { - 'ceph': False, - 'orchestrator': True - } - }, orch_hosts_map[hostname]) for hostname in orch_hosts_map - ] - hosts.extend(orch_hosts_only) - for host in hosts: - host['service_instances'] = populate_service_instances( - host['hostname'], host['services']) - return hosts - - def populate_service_instances(hostname, services): orch = OrchClient.instance() if orch.available(): @@ -173,6 +128,7 @@ def populate_service_instances(hostname, services): return [{'type': k, 'count': v} for k, v in Counter(services).items()] +@ttl_cache(60, label='get_hosts') def get_hosts(sources=None): """ Get hosts from various sources. @@ -184,6 +140,22 @@ def get_hosts(sources=None): from_ceph = 'ceph' in _sources from_orchestrator = 'orchestrator' in _sources + if from_orchestrator: + orch = OrchClient.instance() + if orch.available(): + hosts = [ + merge_dicts( + { + 'ceph_version': '', + 'services': [], + 'sources': { + 'ceph': False, + 'orchestrator': True + } + }, host.to_json()) for host in orch.hosts.list() + ] + return hosts + ceph_hosts = [] if from_ceph: ceph_hosts = [ @@ -198,12 +170,6 @@ def get_hosts(sources=None): 'status': '' }) for server in mgr.list_servers() ] - if from_orchestrator: - orch = OrchClient.instance() - if orch.available(): - return merge_hosts_by_hostname(ceph_hosts, orch.hosts.list()) - for host in ceph_hosts: - host['service_instances'] = populate_service_instances(host['hostname'], host['services']) return ceph_hosts @@ -303,14 +269,30 @@ class Host(RESTController): 'facts': (bool, 'Host Facts') }, responses={200: LIST_HOST_SCHEMA}) - @RESTController.MethodMap(version=APIVersion(1, 2)) - def list(self, sources=None, facts=False): + @RESTController.MethodMap(version=APIVersion(1, 3)) + def list(self, sources=None, facts=False, offset: int = 0, + limit: int = 5, search: str = '', sort: str = ''): hosts = get_hosts(sources) + params = ['hostname'] + paginator = ListPaginator(int(offset), int(limit), sort, search, hosts, + searchable_params=params, sortable_params=params, + default_sort='+hostname') + # pylint: disable=unnecessary-comprehension + hosts = [host for host in paginator.list()] orch = OrchClient.instance() + cherrypy.response.headers['X-Total-Count'] = paginator.get_count() + for host in hosts: + if 'services' not in host: + host['services'] = [] + host['service_instances'] = populate_service_instances( + host['hostname'], host['services']) if str_to_bool(facts): if orch.available(): if not orch.get_missing_features(['get_facts']): - hosts_facts = orch.hosts.get_facts() + hosts_facts = [] + for host in hosts: + facts = orch.hosts.get_facts(host['hostname'])[0] + hosts_facts.append(facts) return merge_list_of_dicts_by_key(hosts, hosts_facts, 'hostname') raise DashboardException( @@ -430,13 +412,18 @@ class Host(RESTController): return [d.to_dict() for d in daemons] @handle_orchestrator_error('host') + @RESTController.MethodMap(version=APIVersion(1, 2)) def get(self, hostname: str) -> Dict: """ Get the specified host. :raises: cherrypy.HTTPError: If host not found. """ - return get_host(hostname) + host = get_host(hostname) + host['service_instances'] = populate_service_instances( + host['hostname'], host['services']) + return host + @ttl_cache_invalidator('get_hosts') @raise_if_no_orchestrator([OrchFeature.HOST_LABEL_ADD, OrchFeature.HOST_LABEL_REMOVE, OrchFeature.HOST_MAINTENANCE_ENTER, diff --git a/ceph/src/pybind/mgr/dashboard/controllers/perf_counters.py b/ceph/src/pybind/mgr/dashboard/controllers/perf_counters.py index 0bd883366..ab0bdcb0b 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/perf_counters.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/perf_counters.py @@ -79,4 +79,4 @@ class PerfCounters(RESTController): @EndpointDoc("Display Perf Counters", responses={200: PERF_SCHEMA}) def list(self): - return mgr.get_all_perf_counters() + return mgr.get_unlabeled_perf_counters() diff --git a/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py b/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py index 5aab37596..b639d8826 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/prometheus.py @@ -61,20 +61,24 @@ class PrometheusRESTController(RESTController): user = None password = None cert_file = None - secure_monitoring_stack = bool(mgr.get_module_option_ex('cephadm', - 'secure_monitoring_stack', - 'false')) - if secure_monitoring_stack: - cmd = {'prefix': f'orch {module_name} access info'} - ret, out, _ = mgr.mon_command(cmd) - if ret == 0 and out is not None: - access_info = json.loads(out) - user = access_info['user'] - password = access_info['password'] - certificate = access_info['certificate'] - cert_file = tempfile.NamedTemporaryFile(delete=False) - cert_file.write(certificate.encode('utf-8')) - cert_file.flush() + + orch_backend = mgr.get_module_option_ex('orchestrator', 'orchestrator') + if orch_backend == 'cephadm': + secure_monitoring_stack = mgr.get_module_option_ex('cephadm', + 'secure_monitoring_stack', + False) + if secure_monitoring_stack: + cmd = {'prefix': f'orch {module_name} get-credentials'} + ret, out, _ = mgr.mon_command(cmd) + if ret == 0 and out is not None: + access_info = json.loads(out) + user = access_info['user'] + password = access_info['password'] + certificate = access_info['certificate'] + cert_file = tempfile.NamedTemporaryFile(delete=False) + cert_file.write(certificate.encode('utf-8')) + cert_file.flush() + return user, password, cert_file def _get_api_url(self, host): diff --git a/ceph/src/pybind/mgr/dashboard/controllers/rbd.py b/ceph/src/pybind/mgr/dashboard/controllers/rbd.py index 027361fee..d0aef6f00 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/rbd.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/rbd.py @@ -49,36 +49,10 @@ def RbdTask(name, metadata, wait_for): # noqa: N802 return composed_decorator -def _sort_features(features, enable=True): - """ - Sorts image features according to feature dependencies: - - object-map depends on exclusive-lock - journaling depends on exclusive-lock - fast-diff depends on object-map - """ - ORDER = ['exclusive-lock', 'journaling', 'object-map', 'fast-diff'] # noqa: N806 - - def key_func(feat): - try: - return ORDER.index(feat) - except ValueError: - return id(feat) - - features.sort(key=key_func, reverse=not enable) - - @APIRouter('/block/image', Scope.RBD_IMAGE) @APIDoc("RBD Management API", "Rbd") class Rbd(RESTController): - # set of image features that can be enable on existing images - ALLOW_ENABLE_FEATURES = {"exclusive-lock", "object-map", "fast-diff", "journaling"} - - # set of image features that can be disabled on existing images - ALLOW_DISABLE_FEATURES = {"exclusive-lock", "object-map", "fast-diff", "deep-flatten", - "journaling"} - DEFAULT_LIMIT = 5 def _rbd_list(self, pool_name=None, offset=0, limit=DEFAULT_LIMIT, search='', sort=''): @@ -132,29 +106,9 @@ class Rbd(RESTController): data_pool=None, configuration=None, metadata=None, mirror_mode=None): - size = int(size) - - def _create(ioctx): - rbd_inst = rbd.RBD() - - # Set order - l_order = None - if obj_size and obj_size > 0: - l_order = int(round(math.log(float(obj_size), 2))) - - # Set features - feature_bitmask = format_features(features) - - rbd_inst.create(ioctx, name, size, order=l_order, old_format=False, - features=feature_bitmask, stripe_unit=stripe_unit, - stripe_count=stripe_count, data_pool=data_pool) - RbdConfiguration(pool_ioctx=ioctx, namespace=namespace, - image_name=name).set_configuration(configuration) - if metadata: - with rbd.Image(ioctx, name) as image: - RbdImageMetadataService(image).set_metadata(metadata) - - rbd_call(pool_name, namespace, _create) + RbdService.create(name, pool_name, size, namespace, + obj_size, features, stripe_unit, stripe_count, + data_pool, configuration, metadata) if mirror_mode: RbdMirroringService.enable_image(name, pool_name, namespace, @@ -166,86 +120,17 @@ class Rbd(RESTController): @RbdTask('delete', ['{image_spec}'], 2.0) def delete(self, image_spec): - pool_name, namespace, image_name = parse_image_spec(image_spec) - - image = RbdService.get_image(image_spec) - snapshots = image['snapshots'] - for snap in snapshots: - RbdSnapshotService.remove_snapshot(image_spec, snap['name'], snap['is_protected']) - - rbd_inst = rbd.RBD() - return rbd_call(pool_name, namespace, rbd_inst.remove, image_name) + return RbdService.delete(image_spec) @RbdTask('edit', ['{image_spec}', '{name}'], 4.0) def set(self, image_spec, name=None, size=None, features=None, configuration=None, metadata=None, enable_mirror=None, primary=None, force=False, resync=False, mirror_mode=None, schedule_interval='', remove_scheduling=False): - - pool_name, namespace, image_name = parse_image_spec(image_spec) - - def _edit(ioctx, image): - rbd_inst = rbd.RBD() - # check rename image - if name and name != image_name: - rbd_inst.rename(ioctx, image_name, name) - - # check resize - if size and size != image.size(): - image.resize(size) - - mirror_image_info = image.mirror_image_get_info() - if enable_mirror and mirror_image_info['state'] == rbd.RBD_MIRROR_IMAGE_DISABLED: - RbdMirroringService.enable_image( - image_name, pool_name, namespace, - MIRROR_IMAGE_MODE[mirror_mode]) - elif (enable_mirror is False - and mirror_image_info['state'] == rbd.RBD_MIRROR_IMAGE_ENABLED): - RbdMirroringService.disable_image( - image_name, pool_name, namespace) - - # check enable/disable features - if features is not None: - curr_features = format_bitmask(image.features()) - # check disabled features - _sort_features(curr_features, enable=False) - for feature in curr_features: - if (feature not in features - and feature in self.ALLOW_DISABLE_FEATURES - and feature in format_bitmask(image.features())): - f_bitmask = format_features([feature]) - image.update_features(f_bitmask, False) - # check enabled features - _sort_features(features) - for feature in features: - if (feature not in curr_features - and feature in self.ALLOW_ENABLE_FEATURES - and feature not in format_bitmask(image.features())): - f_bitmask = format_features([feature]) - image.update_features(f_bitmask, True) - - RbdConfiguration(pool_ioctx=ioctx, image_name=image_name).set_configuration( - configuration) - if metadata: - RbdImageMetadataService(image).set_metadata(metadata) - - if primary and not mirror_image_info['primary']: - RbdMirroringService.promote_image( - image_name, pool_name, namespace, force) - elif primary is False and mirror_image_info['primary']: - RbdMirroringService.demote_image( - image_name, pool_name, namespace) - - if resync: - RbdMirroringService.resync_image(image_name, pool_name, namespace) - - if schedule_interval: - RbdMirroringService.snapshot_schedule_add(image_spec, schedule_interval) - - if remove_scheduling: - RbdMirroringService.snapshot_schedule_remove(image_spec) - - return rbd_image_call(pool_name, namespace, image_name, _edit) + return RbdService.set(image_spec, name, size, features, + configuration, metadata, enable_mirror, primary, + force, resync, mirror_mode, schedule_interval, + remove_scheduling) @RbdTask('copy', {'src_image_spec': '{image_spec}', @@ -258,44 +143,17 @@ class Rbd(RESTController): snapshot_name=None, obj_size=None, features=None, stripe_unit=None, stripe_count=None, data_pool=None, configuration=None, metadata=None): - pool_name, namespace, image_name = parse_image_spec(image_spec) - - def _src_copy(s_ioctx, s_img): - def _copy(d_ioctx): - # Set order - l_order = None - if obj_size and obj_size > 0: - l_order = int(round(math.log(float(obj_size), 2))) - - # Set features - feature_bitmask = format_features(features) - - if snapshot_name: - s_img.set_snap(snapshot_name) - - s_img.copy(d_ioctx, dest_image_name, feature_bitmask, l_order, - stripe_unit, stripe_count, data_pool) - RbdConfiguration(pool_ioctx=d_ioctx, image_name=dest_image_name).set_configuration( - configuration) - if metadata: - with rbd.Image(d_ioctx, dest_image_name) as image: - RbdImageMetadataService(image).set_metadata(metadata) - - return rbd_call(dest_pool_name, dest_namespace, _copy) - - return rbd_image_call(pool_name, namespace, image_name, _src_copy) + return RbdService.copy(image_spec, dest_pool_name, dest_namespace, dest_image_name, + snapshot_name, obj_size, features, + stripe_unit, stripe_count, data_pool, + configuration, metadata) @RbdTask('flatten', ['{image_spec}'], 2.0) @RESTController.Resource('POST') @UpdatePermission @allow_empty_body def flatten(self, image_spec): - - def _flatten(ioctx, image): - image.flatten() - - pool_name, namespace, image_name = parse_image_spec(image_spec) - return rbd_image_call(pool_name, namespace, image_name, _flatten) + return RbdService.flatten(image_spec) @RESTController.Collection('GET') def default_features(self): @@ -325,9 +183,7 @@ class Rbd(RESTController): Images, even ones actively in-use by clones, can be moved to the trash and deleted at a later time. """ - pool_name, namespace, image_name = parse_image_spec(image_spec) - rbd_inst = rbd.RBD() - return rbd_call(pool_name, namespace, rbd_inst.trash_move, image_name, delay) + return RbdService.move_image_to_trash(image_spec, delay) @UIRouter('/block/rbd') diff --git a/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py b/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py index 2ee53fc1a..1e1053077 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py @@ -238,6 +238,17 @@ class ReplayingData(NamedTuple): entries_behind_primary: Optional[int] = None +def _get_mirror_mode(ioctx, image_name): + with rbd.Image(ioctx, image_name) as img: + mirror_mode = img.mirror_image_get_mode() + mirror_mode_str = 'Disabled' + if mirror_mode == rbd.RBD_MIRROR_IMAGE_MODE_JOURNAL: + mirror_mode_str = 'journal' + elif mirror_mode == rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT: + mirror_mode_str = 'snapshot' + return mirror_mode_str + + @ViewCache() @no_type_check def _get_pool_datum(pool_name): @@ -300,7 +311,8 @@ def _get_pool_datum(pool_name): data['mirror_images'] = sorted([ dict({ 'name': image['name'], - 'description': image['description'] + 'description': image['description'], + 'mirror_mode': _get_mirror_mode(ioctx, image['name']) }, **mirror_state['down' if not image['up'] else image['state']]) for image in mirror_image_status ], key=lambda k: k['name']) @@ -363,7 +375,8 @@ def _get_content_data(): # pylint: disable=R0914 'pool_name': pool_name, 'name': mirror_image['name'], 'state_color': mirror_image['state_color'], - 'state': mirror_image['state'] + 'state': mirror_image['state'], + 'mirror_mode': mirror_image['mirror_mode'] } if mirror_image['health'] == 'ok': diff --git a/ceph/src/pybind/mgr/dashboard/controllers/rgw.py b/ceph/src/pybind/mgr/dashboard/controllers/rgw.py index b29e42420..9ccf4b36b 100644 --- a/ceph/src/pybind/mgr/dashboard/controllers/rgw.py +++ b/ceph/src/pybind/mgr/dashboard/controllers/rgw.py @@ -2,20 +2,22 @@ import json import logging +import re from typing import Any, Dict, List, NamedTuple, Optional, Union import cherrypy +from .. import mgr from ..exceptions import DashboardException from ..rest_client import RequestException from ..security import Permission, Scope from ..services.auth import AuthManager, JwtManager from ..services.ceph_service import CephService -from ..services.rgw_client import NoRgwDaemonsException, RgwClient +from ..services.rgw_client import NoRgwDaemonsException, RgwClient, RgwMultisite from ..tools import json_str_to_object, str_to_bool -from . import APIDoc, APIRouter, BaseController, CRUDCollectionMethod, \ - CRUDEndpoint, Endpoint, EndpointDoc, ReadPermission, RESTController, \ - UIRouter, allow_empty_body +from . import APIDoc, APIRouter, BaseController, CreatePermission, \ + CRUDCollectionMethod, CRUDEndpoint, Endpoint, EndpointDoc, ReadPermission, \ + RESTController, UIRouter, UpdatePermission, allow_empty_body from ._crud import CRUDMeta, Form, FormField, FormTaskInfo, Icon, MethodType, \ TableAction, Validator, VerticalContainer from ._version import APIVersion @@ -79,6 +81,43 @@ class Rgw(BaseController): return status +@UIRouter('/rgw/multisite') +class RgwMultisiteStatus(RESTController): + @Endpoint() + @ReadPermission + # pylint: disable=R0801 + def status(self): + status = {'available': True, 'message': None} + multisite_instance = RgwMultisite() + is_multisite_configured = multisite_instance.get_multisite_status() + if not is_multisite_configured: + status['available'] = False + status['message'] = 'Multi-site provides disaster recovery and may also \ + serve as a foundation for content delivery networks' # type: ignore + return status + + @RESTController.Collection(method='PUT', path='/migrate') + @allow_empty_body + # pylint: disable=W0102,W0613 + def migrate(self, daemon_name=None, realm_name=None, zonegroup_name=None, zone_name=None, + zonegroup_endpoints=None, zone_endpoints=None, access_key=None, + secret_key=None): + multisite_instance = RgwMultisite() + result = multisite_instance.migrate_to_multisite(realm_name, zonegroup_name, + zone_name, zonegroup_endpoints, + zone_endpoints, access_key, + secret_key) + return result + + @RESTController.Collection(method='GET', path='/sync_status') + @allow_empty_body + # pylint: disable=W0102,W0613 + def get_sync_status(self): + multisite_instance = RgwMultisite() + result = multisite_instance.get_multisite_sync_status() + return result + + @APIRouter('/rgw/daemon', Scope.RGW) @APIDoc("RGW Daemon Management API", "RgwDaemon") class RgwDaemon(RESTController): @@ -105,7 +144,7 @@ class RgwDaemon(RESTController): 'zonegroup_name': metadata['zonegroup_name'], 'zone_name': metadata['zone_name'], 'default': instance.daemon.name == metadata['id'], - 'port': int(metadata['frontend_config#0'].split('port=')[1]) + 'port': int(re.findall(r'port=(\d+)', metadata['frontend_config#0'])[0]) } daemons.append(daemon) @@ -138,6 +177,12 @@ class RgwDaemon(RESTController): daemon['rgw_status'] = status return daemon + @RESTController.Collection(method='PUT', path='/set_multisite_config') + @allow_empty_body + def set_multisite_config(self, realm_name=None, zonegroup_name=None, + zone_name=None, daemon_name=None): + CephService.set_multisite_config(realm_name, zonegroup_name, zone_name, daemon_name) + class RgwRESTController(RESTController): def proxy(self, daemon_name, method, path, params=None, json_response=True): @@ -395,6 +440,38 @@ class RgwBucket(RgwRESTController): return CephService.get_encryption_config(daemon_name) +@UIRouter('/rgw/bucket', Scope.RGW) +class RgwBucketUi(RgwBucket): + @Endpoint('GET') + @ReadPermission + # pylint: disable=W0613 + def buckets_and_users_count(self, daemon_name=None): + buckets_count = 0 + users_count = 0 + daemon_object = RgwDaemon() + daemons = json.loads(daemon_object.list()) + unique_realms = set() + for daemon in daemons: + realm_name = daemon.get('realm_name', None) + if realm_name: + if realm_name not in unique_realms: + unique_realms.add(realm_name) + buckets = json.loads(RgwBucket.list(self, daemon_name=daemon['id'])) + users = json.loads(RgwUser.list(self, daemon_name=daemon['id'])) + users_count += len(users) + buckets_count += len(buckets) + else: + buckets = json.loads(RgwBucket.list(self, daemon_name=daemon['id'])) + users = json.loads(RgwUser.list(self, daemon_name=daemon['id'])) + users_count = len(users) + buckets_count = len(buckets) + + return { + 'buckets_count': buckets_count, + 'users_count': users_count + } + + @APIRouter('/rgw/user', Scope.RGW) @APIDoc("RGW User Management API", "RgwUser") class RgwUser(RgwRESTController): @@ -683,3 +760,211 @@ class RgwUserRole(NamedTuple): CreateDate: str MaxSessionDuration: int AssumeRolePolicyDocument: str + + +@APIRouter('/rgw/realm', Scope.RGW) +class RgwRealm(RESTController): + @allow_empty_body + # pylint: disable=W0613 + def create(self, realm_name, default): + multisite_instance = RgwMultisite() + result = multisite_instance.create_realm(realm_name, default) + return result + + @allow_empty_body + # pylint: disable=W0613 + def list(self): + multisite_instance = RgwMultisite() + result = multisite_instance.list_realms() + return result + + @allow_empty_body + # pylint: disable=W0613 + def get(self, realm_name): + multisite_instance = RgwMultisite() + result = multisite_instance.get_realm(realm_name) + return result + + @Endpoint() + @ReadPermission + def get_all_realms_info(self): + multisite_instance = RgwMultisite() + result = multisite_instance.get_all_realms_info() + return result + + @allow_empty_body + # pylint: disable=W0613 + def set(self, realm_name: str, new_realm_name: str, default: str = ''): + multisite_instance = RgwMultisite() + result = multisite_instance.edit_realm(realm_name, new_realm_name, default) + return result + + @Endpoint() + @ReadPermission + def get_realm_tokens(self): + try: + result = CephService.get_realm_tokens() + return result + except NoRgwDaemonsException as e: + raise DashboardException(e, http_status_code=404, component='rgw') + + @Endpoint(method='POST') + @UpdatePermission + @allow_empty_body + # pylint: disable=W0613 + def import_realm_token(self, realm_token, zone_name, port, placement_spec): + try: + multisite_instance = RgwMultisite() + result = CephService.import_realm_token(realm_token, zone_name, port, placement_spec) + multisite_instance.update_period() + return result + except NoRgwDaemonsException as e: + raise DashboardException(e, http_status_code=404, component='rgw') + + def delete(self, realm_name): + multisite_instance = RgwMultisite() + result = multisite_instance.delete_realm(realm_name) + return result + + +@APIRouter('/rgw/zonegroup', Scope.RGW) +class RgwZonegroup(RESTController): + @allow_empty_body + # pylint: disable=W0613 + def create(self, realm_name, zonegroup_name, default=None, master=None, + zonegroup_endpoints=None): + multisite_instance = RgwMultisite() + result = multisite_instance.create_zonegroup(realm_name, zonegroup_name, default, + master, zonegroup_endpoints) + return result + + @allow_empty_body + # pylint: disable=W0613 + def list(self): + multisite_instance = RgwMultisite() + result = multisite_instance.list_zonegroups() + return result + + @allow_empty_body + # pylint: disable=W0613 + def get(self, zonegroup_name): + multisite_instance = RgwMultisite() + result = multisite_instance.get_zonegroup(zonegroup_name) + return result + + @Endpoint() + @ReadPermission + def get_all_zonegroups_info(self): + multisite_instance = RgwMultisite() + result = multisite_instance.get_all_zonegroups_info() + return result + + def delete(self, zonegroup_name, delete_pools, pools: Optional[List[str]] = None): + if pools is None: + pools = [] + try: + multisite_instance = RgwMultisite() + result = multisite_instance.delete_zonegroup(zonegroup_name, delete_pools, pools) + return result + except NoRgwDaemonsException as e: + raise DashboardException(e, http_status_code=404, component='rgw') + + @allow_empty_body + # pylint: disable=W0613,W0102 + def set(self, zonegroup_name: str, realm_name: str, new_zonegroup_name: str, + default: str = '', master: str = '', zonegroup_endpoints: str = '', + add_zones: List[str] = [], remove_zones: List[str] = [], + placement_targets: List[Dict[str, str]] = []): + multisite_instance = RgwMultisite() + result = multisite_instance.edit_zonegroup(realm_name, zonegroup_name, new_zonegroup_name, + default, master, zonegroup_endpoints, add_zones, + remove_zones, placement_targets) + return result + + +@APIRouter('/rgw/zone', Scope.RGW) +class RgwZone(RESTController): + @allow_empty_body + # pylint: disable=W0613 + def create(self, zone_name, zonegroup_name=None, default=False, master=False, + zone_endpoints=None, access_key=None, secret_key=None): + multisite_instance = RgwMultisite() + result = multisite_instance.create_zone(zone_name, zonegroup_name, default, + master, zone_endpoints, access_key, + secret_key) + return result + + @allow_empty_body + # pylint: disable=W0613 + def list(self): + multisite_instance = RgwMultisite() + result = multisite_instance.list_zones() + return result + + @allow_empty_body + # pylint: disable=W0613 + def get(self, zone_name): + multisite_instance = RgwMultisite() + result = multisite_instance.get_zone(zone_name) + return result + + @Endpoint() + @ReadPermission + def get_all_zones_info(self): + multisite_instance = RgwMultisite() + result = multisite_instance.get_all_zones_info() + return result + + def delete(self, zone_name, delete_pools, pools: Optional[List[str]] = None, + zonegroup_name=None): + if pools is None: + pools = [] + if zonegroup_name is None: + zonegroup_name = '' + try: + multisite_instance = RgwMultisite() + result = multisite_instance.delete_zone(zone_name, delete_pools, pools, zonegroup_name) + return result + except NoRgwDaemonsException as e: + raise DashboardException(e, http_status_code=404, component='rgw') + + @allow_empty_body + # pylint: disable=W0613,W0102 + def set(self, zone_name: str, new_zone_name: str, zonegroup_name: str, default: str = '', + master: str = '', zone_endpoints: str = '', access_key: str = '', secret_key: str = '', + placement_target: str = '', data_pool: str = '', index_pool: str = '', + data_extra_pool: str = '', storage_class: str = '', data_pool_class: str = '', + compression: str = ''): + multisite_instance = RgwMultisite() + result = multisite_instance.edit_zone(zone_name, new_zone_name, zonegroup_name, default, + master, zone_endpoints, access_key, secret_key, + placement_target, data_pool, index_pool, + data_extra_pool, storage_class, data_pool_class, + compression) + return result + + @Endpoint() + @ReadPermission + def get_pool_names(self): + pool_names = [] + ret, out, _ = mgr.check_mon_command({ + 'prefix': 'osd lspools', + 'format': 'json', + }) + if ret == 0 and out is not None: + pool_names = json.loads(out) + return pool_names + + @Endpoint('PUT') + @CreatePermission + def create_system_user(self, userName: str, zoneName: str): + multisite_instance = RgwMultisite() + result = multisite_instance.create_system_user(userName, zoneName) + return result + + @Endpoint() + @ReadPermission + def get_user_list(self, zoneName=None): + multisite_instance = RgwMultisite() + result = multisite_instance.get_user_list(zoneName) + return result diff --git a/ceph/src/pybind/mgr/dashboard/frontend/.npmrc b/ceph/src/pybind/mgr/dashboard/frontend/.npmrc index ce9882e34..4fc3ee7e9 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/.npmrc +++ b/ceph/src/pybind/mgr/dashboard/frontend/.npmrc @@ -1,2 +1,3 @@ audit=false save-exact=true +legacy-peer-deps=true diff --git a/ceph/src/pybind/mgr/dashboard/frontend/CMakeLists.txt b/ceph/src/pybind/mgr/dashboard/frontend/CMakeLists.txt index f34cde6fb..2527ef23e 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/CMakeLists.txt +++ b/ceph/src/pybind/mgr/dashboard/frontend/CMakeLists.txt @@ -40,6 +40,7 @@ function(add_npm_options) npm config set ${key} ${value} --userconfig ${NC_NODEENV_DIR}/.npmrc && deactivate) endforeach() + set(npm_config_python ${MGR_PYTHON_EXECUTABLE}) add_custom_target(${NC_TARGET} ${commands} DEPENDS ${NC_NODEENV_DIR}/bin/npm @@ -63,7 +64,7 @@ else(WITH_SYSTEM_NPM) OUTPUT "${mgr-dashboard-nodeenv-dir}/bin/npm" COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${MGR_PYTHON_EXECUTABLE} ${mgr-dashboard-nodeenv-dir} COMMAND ${mgr-dashboard-nodeenv-dir}/bin/pip install nodeenv - COMMAND ${mgr-dashboard-nodeenv-dir}/bin/nodeenv --verbose ${node_mirror_opt} -p --node=14.15.1 + COMMAND ${mgr-dashboard-nodeenv-dir}/bin/nodeenv --verbose ${node_mirror_opt} -p --node=18.17.0 COMMAND mkdir ${mgr-dashboard-nodeenv-dir}/.npm WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "dashboard nodeenv is being installed") @@ -73,7 +74,6 @@ else(WITH_SYSTEM_NPM) add_npm_options( NODEENV_DIR ${mgr-dashboard-nodeenv-dir} TARGET mgr-dashboard-nodeenv - OPTION python=${MGR_PYTHON_EXECUTABLE} OPTION cache=${mgr-dashboard-nodeenv-dir}/.npm ${npm_registry_opts}) add_custom_target(mgr-dashboard-frontend-deps diff --git a/ceph/src/pybind/mgr/dashboard/frontend/angular.json b/ceph/src/pybind/mgr/dashboard/frontend/angular.json index 4bfab2baf..e1cb4c29f 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/angular.json +++ b/ceph/src/pybind/mgr/dashboard/frontend/angular.json @@ -274,7 +274,6 @@ "cli": {} } }, - "defaultProject": "ceph-dashboard", "schematics": { "@schematics/angular:component": { "prefix": "cd", @@ -286,6 +285,8 @@ }, "cli": { "analytics": false, - "defaultCollection": "@angular-eslint/schematics" + "schematicCollections": [ + "@angular-eslint/schematics" + ] } } diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress.config.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress.config.ts index 3948ea8db..fa3349883 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress.config.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress.config.ts @@ -1,4 +1,4 @@ -import { defineConfig } from 'cypress' +import { defineConfig } from 'cypress'; export default defineConfig({ video: true, @@ -9,18 +9,22 @@ export default defineConfig({ viewportWidth: 1920, projectId: 'k7ab29', reporter: 'cypress-multi-reporters', + reporterOptions: { reporterEnabled: 'spec, mocha-junit-reporter', mochaJunitReporterReporterOptions: { - mochaFile: 'cypress/reports/results-[hash].xml', - }, + mochaFile: 'cypress/reports/results-[hash].xml' + } }, + retries: 1, + env: { LOGIN_USER: 'admin', LOGIN_PWD: 'admin', - CEPH2_URL: 'https://localhost:4202/', + CEPH2_URL: 'https://localhost:4202/' }, + chromeWebSecurity: false, eyesIsDisabled: false, eyesFailCypressOnDiff: true, @@ -28,15 +32,24 @@ export default defineConfig({ eyesLegacyHooks: true, eyesTestConcurrency: 5, eyesPort: 35321, + e2e: { // We've imported your old cypress plugins here. // You may want to clean this up later by importing these. setupNodeEvents(on, config) { - return require('./cypress/plugins/index.js')(on, config) + return require('./cypress/plugins/index.js')(on, config); }, baseUrl: 'https://localhost:4200/', excludeSpecPattern: ['*.po.ts', '**/orchestrator/**'], experimentalSessionAndOrigin: true, - specPattern: 'cypress/e2e/**/*-spec.{js,jsx,ts,tsx}', + specPattern: 'cypress/e2e/**/*-spec.{js,jsx,ts,tsx,feature}' }, -}) + + component: { + devServer: { + framework: 'angular', + bundler: 'webpack' + }, + specPattern: '**/*.cy.ts' + } +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.po.ts index 59f311d64..f8f21ac22 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.po.ts @@ -162,6 +162,7 @@ export class HostsPageHelper extends PageHelper { drain(hostname: string) { this.getTableCell(this.columnIndex.hostname, hostname, true).click(); this.clickActionButton('start-drain'); + cy.wait(1000); this.checkLabelExists(hostname, ['_no_schedule'], true); this.clickTab('cd-host-details', hostname, 'Daemons'); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.po.ts index 7efd8a652..5c34eee5c 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.po.ts @@ -16,18 +16,21 @@ export class LogsPageHelper extends PageHelper { cy.contains('.nav-link', 'Audit Logs').click(); // Enter an earliest time so that no old messages with the same pool name show up - cy.get('.ngb-tp-input').its(0).clear(); + cy.get('.ngb-tp-input') + .its(0) + .then((input) => { + cy.wrap(input).clear(); - if (hour < 10) { - cy.get('.ngb-tp-input').its(0).type('0'); - } - cy.get('.ngb-tp-input').its(0).type(`${hour}`); + if (hour < 10) cy.wrap(input).type(`${hour}`); + }); - cy.get('.ngb-tp-input').its(1).clear(); - if (minute < 10) { - cy.get('.ngb-tp-input').its(1).type('0'); - } - cy.get('.ngb-tp-input').its(1).type(`${minute}`); + cy.get('.ngb-tp-input') + .its(1) + .then((input) => { + cy.wrap(input).clear(); + + if (minute < 10) cy.wrap(input).type(`${minute}`); + }); // Enter the pool name into the filter box cy.get('input.form-control.ng-valid').first().clear().type(poolname); @@ -46,17 +49,21 @@ export class LogsPageHelper extends PageHelper { cy.contains('.nav-link', 'Audit Logs').click(); // Enter an earliest time so that no old messages with the same config name show up - cy.get('.ngb-tp-input').its(0).clear(); - if (hour < 10) { - cy.get('.ngb-tp-input').its(0).type('0'); - } - cy.get('.ngb-tp-input').its(0).type(`${hour}`); - - cy.get('.ngb-tp-input').its(1).clear(); - if (minute < 10) { - cy.get('.ngb-tp-input').its(1).type('0'); - } - cy.get('.ngb-tp-input').its(1).type(`${minute}`); + cy.get('.ngb-tp-input') + .its(0) + .then((input) => { + cy.wrap(input).clear(); + + if (hour < 10) cy.wrap(input).type(`${hour}`); + }); + + cy.get('.ngb-tp-input') + .its(1) + .then((input) => { + cy.wrap(input).clear(); + + if (minute < 10) cy.wrap(input).type(`${minute}`); + }); // Enter the config name into the filter box cy.get('input.form-control.ng-valid').first().clear().type(configname); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/01-global.feature.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/01-global.feature.po.ts deleted file mode 100644 index 4ddd11fdf..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/01-global.feature.po.ts +++ /dev/null @@ -1,187 +0,0 @@ -import { And, Given, Then, When } from 'cypress-cucumber-preprocessor/steps'; - -import { UrlsCollection } from './urls.po'; - -const urlsCollection = new UrlsCollection(); - -Given('I am logged in', () => { - cy.login(); -}); - -Given('I am on the {string} page', (page: string) => { - cy.visit(urlsCollection.pages[page].url); - cy.get(urlsCollection.pages[page].id).should('exist'); -}); - -Then('I should be on the {string} page', (page: string) => { - cy.get(urlsCollection.pages[page].id).should('exist'); -}); - -And('I should see a button to {string}', (button: string) => { - cy.get(`[aria-label="${button}"]`).should('be.visible'); -}); - -When('I click on {string} button', (button: string) => { - cy.get(`[aria-label="${button}"]`).first().click(); -}); - -// When you are clicking on an action in the table actions dropdown button -When('I click on {string} button from the table actions', (button: string) => { - cy.get('.table-actions button.dropdown-toggle').first().click(); - cy.get(`[aria-label="${button}"]`).first().click(); -}); - -And('select options {string}', (labels: string) => { - if (labels) { - cy.get('a[data-testid=select-menu-edit]').click(); - for (const label of labels.split(', ')) { - cy.get('.popover-body div.select-menu-item-content').contains(label).click(); - } - } -}); - -And('{string} option {string}', (action: string, labels: string) => { - if (labels) { - if (action === 'add') { - cy.get('cd-modal').find('.select-menu-edit').click(); - for (const label of labels.split(', ')) { - cy.get('.popover-body input').type(`${label}{enter}`); - } - } else { - for (const label of labels.split(', ')) { - cy.contains('cd-modal .badge', new RegExp(`^${label}$`)) - .find('.badge-remove') - .click(); - } - } - } -}); - -/** - * Fills in the given field using the value provided - * @param field ID of the field that needs to be filled out. - * @param value Value that should be filled in the field. - */ -And('enter {string} {string}', (field: string, value: string) => { - cy.get('cd-modal').within(() => { - cy.get(`input[id=${field}]`).type(value); - }); -}); - -And('I click on submit button', () => { - cy.get('[data-cy=submitBtn]').click(); -}); - -/** - * Selects any row on the datatable if it matches the given name - */ -When('I select a row {string}', (row: string) => { - cy.get('cd-table .search input').first().clear().type(row); - cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).click(); -}); - -Then('I should see the modal', () => { - cy.get('cd-modal').should('exist'); -}); - -Then('I should not see the modal', () => { - cy.get('cd-modal').should('not.exist'); -}); - -/** - * Some modals have an additional confirmation to be provided - * by ticking the 'Are you sure?' box. - */ -Then('I check the tick box in modal', () => { - cy.get('cd-modal .custom-control-label').click(); -}); - -And('I confirm to {string}', (action: string) => { - cy.contains('cd-modal button', action).click(); - cy.get('cd-modal').should('not.exist'); -}); - -Then('I should see an error in {string} field', (field: string) => { - cy.get('cd-modal').within(() => { - cy.get(`input[id=${field}]`).should('have.class', 'ng-invalid'); - }); -}); - -Then('I should see a row with {string}', (row: string) => { - cy.get('cd-table .search input').first().clear().type(row); - cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( - 'exist' - ); -}); - -Then('I should not see a row with {string}', (row: string) => { - cy.get('cd-table .search input').first().clear().type(row); - cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( - 'not.exist' - ); -}); - -Then('I should see rows with following entries', (entries) => { - entries.hashes().forEach((entry: any) => { - cy.get('cd-table .search input').first().clear().type(entry.hostname); - cy.contains( - `datatable-body-row datatable-body-cell .datatable-body-cell-label`, - entry.hostname - ).should('exist'); - }); -}); - -And('I should see row {string} have {string}', (row: string, options: string) => { - if (options) { - cy.get('cd-table .search input').first().clear().type(row); - for (const option of options.split(',')) { - cy.contains( - `datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`, - option - ).should('exist'); - } - } -}); - -And('I should see row {string} does not have {string}', (row: string, options: string) => { - if (options) { - cy.get('cd-table .search input').first().clear().type(row); - for (const option of options.split(',')) { - cy.contains( - `datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`, - option - ).should('not.exist'); - } - } -}); - -And('I go to the {string} tab', (names: string) => { - for (const name of names.split(', ')) { - cy.contains('.nav.nav-tabs a', name).click(); - } -}); - -And('select {string} {string}', (selectionName: string, option: string) => { - cy.get(`select[name=${selectionName}]`).select(option); - cy.get(`select[name=${selectionName}] option:checked`).contains(option); -}); - -When('I expand the row {string}', (row: string) => { - cy.contains('.datatable-body-row', row).first().find('.tc_expand-collapse').click(); -}); - -And('I should see row {string} have {string} on this tab', (row: string, options: string) => { - if (options) { - cy.get('cd-table').should('exist'); - cy.get('datatable-scroller, .empty-row'); - cy.get('.datatable-row-detail').within(() => { - cy.get('cd-table .search input').first().clear().type(row); - for (const option of options.split(',')) { - cy.contains( - `datatable-body-row datatable-body-cell .datatable-body-cell-label span`, - option - ).should('exist'); - } - }); - } -}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/forms-helper.feature.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/forms-helper.feature.po.ts new file mode 100644 index 000000000..2c14af863 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/forms-helper.feature.po.ts @@ -0,0 +1,77 @@ +import { And, Then } from 'cypress-cucumber-preprocessor/steps'; + +/** + * Fills in the given field using the value provided + * @param field ID of the field that needs to be filled out. + * @param value Value that should be filled in the field. + */ +And('enter {string} {string}', (field: string, value: string) => { + cy.get('.cd-col-form').within(() => { + cy.get(`input[id=${field}]`).clear().type(value); + }); +}); + +/** + * Fills in the given field using the value provided + * @param field ID of the field that needs to be filled out. + * @param value Value that should be filled in the field. + */ +And('enter {string} {string} in the modal', (field: string, value: string) => { + cy.get('cd-modal').within(() => { + cy.get(`input[id=${field}]`).clear().type(value); + }); +}); + +And('select options {string}', (labels: string) => { + if (labels) { + cy.get('a[data-testid=select-menu-edit]').click(); + for (const label of labels.split(', ')) { + cy.get('.popover-body div.select-menu-item-content').contains(label).click(); + } + } +}); + +And('{string} option {string}', (action: string, labels: string) => { + if (labels) { + if (action === 'add') { + cy.get('cd-modal').find('.select-menu-edit').click(); + for (const label of labels.split(', ')) { + cy.get('.popover-body input').type(`${label}{enter}`); + } + } else { + for (const label of labels.split(', ')) { + cy.contains('cd-modal .badge', new RegExp(`^${label}$`)) + .find('.badge-remove') + .click(); + } + } + } +}); + +And('I click on submit button', () => { + cy.get('[data-cy=submitBtn]').click(); +}); + +/** + * Some modals have an additional confirmation to be provided + * by ticking the 'Are you sure?' box. + */ +Then('I check the tick box in modal', () => { + cy.get('cd-modal input#confirmation').click(); +}); + +And('I confirm to {string}', (action: string) => { + cy.contains('cd-modal button', action).click(); + cy.get('cd-modal').should('not.exist'); +}); + +Then('I should see an error in {string} field', (field: string) => { + cy.get('cd-modal').within(() => { + cy.get(`input[id=${field}]`).should('have.class', 'ng-invalid'); + }); +}); + +And('select {string} {string}', (selectionName: string, option: string) => { + cy.get(`select[name=${selectionName}]`).select(option); + cy.get(`select[name=${selectionName}] option:checked`).contains(option); +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/global.feature.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/global.feature.po.ts new file mode 100644 index 000000000..c6132ae3d --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/global.feature.po.ts @@ -0,0 +1,40 @@ +import { And, Given, Then, When } from 'cypress-cucumber-preprocessor/steps'; + +import { UrlsCollection } from './urls.po'; + +const urlsCollection = new UrlsCollection(); + +Given('I am logged in', () => { + cy.login(); +}); + +Given('I am on the {string} page', (page: string) => { + cy.visit(urlsCollection.pages[page].url); + cy.get(urlsCollection.pages[page].id).should('exist'); +}); + +Then('I should be on the {string} page', (page: string) => { + cy.get(urlsCollection.pages[page].id).should('exist'); +}); + +And('I should see a button to {string}', (button: string) => { + cy.get(`[aria-label="${button}"]`).should('be.visible'); +}); + +When('I click on {string} button', (button: string) => { + cy.get(`[aria-label="${button}"]`).first().click(); +}); + +Then('I should see the modal', () => { + cy.get('cd-modal').should('exist'); +}); + +Then('I should not see the modal', () => { + cy.get('cd-modal').should('not.exist'); +}); + +And('I go to the {string} tab', (names: string) => { + for (const name of names.split(', ')) { + cy.contains('.nav.nav-tabs a', name).click(); + } +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/table-helper.feature.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/table-helper.feature.po.ts new file mode 100644 index 000000000..82a2c7c35 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/table-helper.feature.po.ts @@ -0,0 +1,135 @@ +import { And, Then, When } from 'cypress-cucumber-preprocessor/steps'; + +// When you are clicking on an action in the table actions dropdown button +When('I click on {string} button from the table actions', (button: string) => { + cy.get('.table-actions button.dropdown-toggle').first().click(); + cy.get(`[aria-label="${button}"]`).first().click(); +}); + +// When you are clicking on an action inside the expanded table row +When('I click on {string} button from the expanded row', (button: string) => { + cy.get('.datatable-row-detail').within(() => { + cy.get('.table-actions button.dropdown-toggle').first().click(); + cy.get(`[aria-label="${button}"]`).first().click(); + }); +}); + +When('I click on {string} button from the table actions in the expanded row', (button: string) => { + cy.get('.datatable-row-detail').within(() => { + cy.get('.table-actions button.dropdown-toggle').first().click(); + cy.get(`[aria-label="${button}"]`).first().click(); + }); +}); + +When('I expand the row {string}', (row: string) => { + cy.contains('.datatable-body-row', row).first().find('.tc_expand-collapse').click(); +}); + +/** + * Selects any row on the datatable if it matches the given name + */ +When('I select a row {string}', (row: string) => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).click(); +}); + +When('I select a row {string} in the expanded row', (row: string) => { + cy.get('.datatable-row-detail').within(() => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).click(); + }); +}); + +Then('I should see a row with {string}', (row: string) => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( + 'exist' + ); +}); + +Then('I should not see a row with {string}', (row: string) => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( + 'not.exist' + ); +}); + +Then('I should not see a row with {string} in the expanded row', (row: string) => { + cy.get('.datatable-row-detail').within(() => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( + 'not.exist' + ); + }); +}); + +Then('I should see rows with following entries', (entries) => { + entries.hashes().forEach((entry: any) => { + cy.get('cd-table .search input').first().clear().type(entry.hostname); + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label`, + entry.hostname + ).should('exist'); + }); +}); + +And('I should see row {string} have {string}', (row: string, options: string) => { + if (options) { + cy.get('cd-table .search input').first().clear().type(row); + for (const option of options.split(',')) { + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`, + option + ).should('exist'); + } + } +}); + +And('I should see row {string} of the expanded row to have a usage bar', (row: string) => { + cy.get('.datatable-row-detail').within(() => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( + 'exist' + ); + cy.get('.datatable-body-row .datatable-body-cell .datatable-body-cell-label .progress').should( + 'exist' + ); + }); +}); + +And('I should see row {string} does not have {string}', (row: string, options: string) => { + if (options) { + cy.get('cd-table .search input').first().clear().type(row); + for (const option of options.split(',')) { + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`, + option + ).should('not.exist'); + } + } +}); + +Then('I should see a row with {string} in the expanded row', (row: string) => { + cy.get('.datatable-row-detail').within(() => { + cy.get('cd-table .search input').first().clear().type(row); + cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should( + 'exist' + ); + }); +}); + +And('I should see row {string} have {string} on this tab', (row: string, options: string) => { + if (options) { + cy.get('cd-table').should('exist'); + cy.get('datatable-scroller, .empty-row'); + cy.get('.datatable-row-detail').within(() => { + cy.get('cd-table .search input').first().clear().type(row); + for (const option of options.split(',')) { + cy.contains( + `datatable-body-row datatable-body-cell .datatable-body-cell-label span`, + option + ).should('exist'); + } + }); + } +}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/urls.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/urls.po.ts index 286355085..6f7316f98 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/urls.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/urls.po.ts @@ -39,6 +39,10 @@ export class UrlsCollection extends PageHelper { logs: { url: '#/logs', id: 'cd-logs' }, // RGW Daemons - 'rgw daemons': { url: '#/rgw/daemon', id: 'cd-rgw-daemon-list' } + 'rgw daemons': { url: '#/rgw/daemon', id: 'cd-rgw-daemon-list' }, + + // CephFS + cephfs: { url: '#/cephfs', id: 'cd-cephfs-list' }, + 'create cephfs': { url: '#/cephfs/create', id: 'cd-cephfs-form' } }; } diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.feature b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.feature new file mode 100644 index 000000000..2c08fb56e --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.feature @@ -0,0 +1,30 @@ +Feature: CephFS Management + + Goal: To test out the CephFS management features + + Background: Login + Given I am logged in + + Scenario: Create a CephFS Volume + Given I am on the "cephfs" page + And I click on "Create" button + And enter "name" "test_cephfs" + And I click on "Create File System" button + Then I should see a row with "test_cephfs" + + Scenario: Edit CephFS Volume + Given I am on the "cephfs" page + And I select a row "test_cephfs" + And I click on "Edit" button + And enter "name" "test_cephfs_edit" + And I click on "Edit File System" button + Then I should see a row with "test_cephfs_edit" + + Scenario: Remove CephFS Volume + Given I am on the "cephfs" page + And I select a row "test_cephfs_edit" + And I click on "Remove" button from the table actions + Then I should see the modal + And I check the tick box in modal + And I click on "Remove File System" button + Then I should not see a row with "test_cephfs_edit" diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.ts deleted file mode 100644 index de66a005a..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { FilesystemsPageHelper } from './filesystems.po'; - -describe('File Systems page', () => { - const filesystems = new FilesystemsPageHelper(); - - beforeEach(() => { - cy.login(); - filesystems.navigateTo(); - }); - - describe('breadcrumb test', () => { - it('should open and show breadcrumb', () => { - filesystems.expectBreadcrumbText('File Systems'); - }); - }); -}); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.po.ts deleted file mode 100644 index bd6e5b8b7..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.po.ts +++ /dev/null @@ -1,5 +0,0 @@ -import { PageHelper } from '../page-helper.po'; - -export class FilesystemsPageHelper extends PageHelper { - pages = { index: { url: '#/cephfs', id: 'cd-cephfs-list' } }; -} diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolume-groups.e2e-spec.feature b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolume-groups.e2e-spec.feature new file mode 100644 index 000000000..66e3f726a --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolume-groups.e2e-spec.feature @@ -0,0 +1,51 @@ +Feature: CephFS Subvolume Group management + + Goal: To test out the CephFS subvolume group management features + + Background: Login + Given I am logged in + + Scenario: Create a CephFS Volume + Given I am on the "cephfs" page + And I click on "Create" button + And enter "name" "test_cephfs" + And I click on "Create File System" button + Then I should see a row with "test_cephfs" + + Scenario: Create a CephFS Subvolume Group + Given I am on the "cephfs" page + When I expand the row "test_cephfs" + And I go to the "Subvolume groups" tab + And I click on "Create" button from the expanded row + And enter "subvolumegroupName" "test_subvolume_group" in the modal + And I click on "Create Subvolume group" button + Then I should see a row with "test_subvolume_group" in the expanded row + + Scenario: Edit a CephFS Subvolume + Given I am on the "cephfs" page + When I expand the row "test_cephfs" + And I go to the "Subvolume groups" tab + When I select a row "test_subvolume_group" in the expanded row + And I click on "Edit" button from the table actions in the expanded row + And enter "size" "1" in the modal + And I click on "Edit Subvolume group" button + Then I should see row "test_subvolume_group" of the expanded row to have a usage bar + + Scenario: Remove a CephFS Subvolume + Given I am on the "cephfs" page + When I expand the row "test_cephfs" + And I go to the "Subvolume groups" tab + When I select a row "test_subvolume_group" in the expanded row + And I click on "Remove" button from the table actions in the expanded row + And I check the tick box in modal + And I click on "Remove subvolume group" button + Then I should not see a row with "test_subvolume_group" in the expanded row + + Scenario: Remove CephFS Volume + Given I am on the "cephfs" page + And I select a row "test_cephfs" + And I click on "Remove" button from the table actions + Then I should see the modal + And I check the tick box in modal + And I click on "Remove File System" button + Then I should not see a row with "test_cephfs_edit" diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolumes.e2e-spec.feature b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolumes.e2e-spec.feature new file mode 100644 index 000000000..ae968d4e9 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/subvolumes.e2e-spec.feature @@ -0,0 +1,51 @@ +Feature: CephFS Subvolume management + + Goal: To test out the CephFS subvolume management features + + Background: Login + Given I am logged in + + Scenario: Create a CephFS Volume + Given I am on the "cephfs" page + And I click on "Create" button + And enter "name" "test_cephfs" + And I click on "Create File System" button + Then I should see a row with "test_cephfs" + + Scenario: Create a CephFS Subvolume + Given I am on the "cephfs" page + When I expand the row "test_cephfs" + And I go to the "Subvolumes" tab + And I click on "Create" button from the expanded row + And enter "subvolumeName" "test_subvolume" in the modal + And I click on "Create Subvolume" button + Then I should see a row with "test_subvolume" in the expanded row + + Scenario: Edit a CephFS Subvolume + Given I am on the "cephfs" page + When I expand the row "test_cephfs" + And I go to the "Subvolumes" tab + When I select a row "test_subvolume" in the expanded row + And I click on "Edit" button from the table actions in the expanded row + And enter "size" "1" in the modal + And I click on "Edit Subvolume" button + Then I should see row "test_subvolume" of the expanded row to have a usage bar + + Scenario: Remove a CephFS Subvolume + Given I am on the "cephfs" page + When I expand the row "test_cephfs" + And I go to the "Subvolumes" tab + When I select a row "test_subvolume" in the expanded row + And I click on "Remove" button from the table actions in the expanded row + And I check the tick box in modal + And I click on "Remove Subvolume" button + Then I should not see a row with "test_subvolume" in the expanded row + + Scenario: Remove CephFS Volume + Given I am on the "cephfs" page + And I select a row "test_cephfs" + And I click on "Remove" button from the table actions + Then I should see the modal + And I check the tick box in modal + And I click on "Remove File System" button + Then I should not see a row with "test_cephfs_edit" diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/01-hosts.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/01-hosts.e2e-spec.ts index 8ad2a1daf..0afe0d74b 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/01-hosts.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/01-hosts.e2e-spec.ts @@ -57,29 +57,5 @@ describe('Hosts page', () => { hosts.editLabels(hostname, labels, true); hosts.editLabels(hostname, labels, false); }); - - it('should enter host into maintenance', function () { - const hostname = Cypress._.sample(this.hosts).name; - const serviceList = new Array(); - this.services.forEach((service: any) => { - if (hostname === service.hostname) { - serviceList.push(service.daemon_type); - } - }); - let enterMaintenance = true; - serviceList.forEach((service: string) => { - if (service === 'mgr' || service === 'alertmanager') { - enterMaintenance = false; - } - }); - if (enterMaintenance) { - hosts.maintenance(hostname); - } - }); - - it('should exit host from maintenance', function () { - const hostname = Cypress._.sample(this.hosts).name; - hosts.maintenance(hostname, true); - }); }); }); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/02-create-cluster-add-host.feature b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/02-create-cluster-add-host.feature index be49fcba0..ddbfd31a3 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/02-create-cluster-add-host.feature +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/02-create-cluster-add-host.feature @@ -12,7 +12,7 @@ Feature: Cluster expansion host addition Scenario Outline: Add hosts Given I am on the "Add Hosts" section When I click on "Add" button - And enter "hostname" "" + And enter "hostname" "" in the modal And select options "" And I click on "Add Host" button Then I should not see the modal @@ -43,7 +43,7 @@ Feature: Cluster expansion host addition Scenario: Add hosts using pattern 'ceph-node-[01-02]' Given I am on the "Add Hosts" section When I click on "Add" button - And enter "hostname" "ceph-node-[01-02]" + And enter "hostname" "ceph-node-[01-02]" in the modal And I click on "Add Host" button Then I should not see the modal And I should see rows with following entries @@ -55,7 +55,7 @@ Feature: Cluster expansion host addition Given I am on the "Add Hosts" section And I should see a row with "ceph-node-00" When I click on "Add" button - And enter "hostname" "ceph-node-00" + And enter "hostname" "ceph-node-00" in the modal Then I should see an error in "hostname" field Scenario Outline: Add and remove labels on host diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.po.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.po.ts index a27be3c6b..47b0639bc 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.po.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.po.ts @@ -10,6 +10,11 @@ export class BucketsPageHelper extends PageHelper { pages = pages; + columnIndex = { + name: 3, + owner: 4 + }; + versioningStateEnabled = 'Enabled'; versioningStateSuspended = 'Suspended'; @@ -73,18 +78,22 @@ export class BucketsPageHelper extends PageHelper { cy.get('input[id=versioning]').should('be.disabled'); cy.contains('button', 'Edit Bucket').click(); + this.getTableCell(this.columnIndex.name, name) + .parent() + .find(`datatable-body-cell:nth-child(${this.columnIndex.owner})`) + .should(($elements) => { + const bucketName = $elements.text(); + expect(bucketName).to.eq(new_owner); + }); + // wait to be back on buckets page with table visible and click this.getExpandCollapseElement(name).click(); // check its details table for edited owner field - cy.get('.table.table-striped.table-bordered') - .first() - .should('contains.text', new_owner) - .as('bucketDataTable'); + cy.get('.table.table-striped.table-bordered').first().as('bucketDataTable'); // Check versioning enabled: - cy.get('@bucketDataTable').find('tr').its(2).find('td').last().should('have.text', new_owner); - cy.get('@bucketDataTable').find('tr').its(11).find('td').last().as('versioningValueCell'); + cy.get('@bucketDataTable').find('tr').its(0).find('td').last().as('versioningValueCell'); return cy.get('@versioningValueCell').should('have.text', this.versioningStateEnabled); } @@ -92,21 +101,23 @@ export class BucketsPageHelper extends PageHelper { cy.get('input[id=versioning]').should('not.be.checked'); cy.get('label[for=versioning]').click(); cy.get('input[id=versioning]').should('be.checked'); - cy.contains('button', 'Edit Bucket').click(); + // Check if the owner is updated + this.getTableCell(this.columnIndex.name, name) + .parent() + .find(`datatable-body-cell:nth-child(${this.columnIndex.owner})`) + .should(($elements) => { + const bucketName = $elements.text(); + expect(bucketName).to.eq(new_owner); + }); + // wait to be back on buckets page with table visible and click this.getExpandCollapseElement(name).click(); - // check its details table for edited owner field - cy.get('.table.table-striped.table-bordered') - .first() - .should('contains.text', new_owner) - .as('bucketDataTable'); - // Check versioning enabled: - cy.get('@bucketDataTable').find('tr').its(2).find('td').last().should('have.text', new_owner); - cy.get('@bucketDataTable').find('tr').its(11).find('td').last().as('versioningValueCell'); + cy.get('.table.table-striped.table-bordered').first().as('bucketDataTable'); + cy.get('@bucketDataTable').find('tr').its(0).find('td').last().as('versioningValueCell'); cy.get('@versioningValueCell').should('have.text', this.versioningStateEnabled); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard-v3.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard-v3.e2e-spec.ts index 80ea7c325..3815011a1 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard-v3.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/dashboard-v3.e2e-spec.ts @@ -39,7 +39,7 @@ describe('Dashboard-v3 Main Page', () => { it('should verify that cards exist on dashboard in proper order', () => { // Ensures that cards are all displayed on the dashboard tab while being in the proper // order, checks for card title and position via indexing into a list of all cards. - const order = ['Details', 'Status', 'Capacity', 'Inventory', 'Cluster utilization']; + const order = ['Details', 'Inventory', 'Status', 'Capacity', 'Cluster Utilization']; for (let i = 0; i < order.length; i++) { dashboard.card(i).should('contain.text', order[i]); diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/notification.e2e-spec.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/notification.e2e-spec.ts index b69f26f58..0a25d7e86 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/notification.e2e-spec.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/e2e/ui/notification.e2e-spec.ts @@ -38,7 +38,7 @@ describe('Notification page', () => { notification.getTasks().contains(poolName).should('exist'); // Delete pool after task is complete (otherwise we get an error). - notification.getTasks().contains(poolName, { timeout: 300000 }).should('not.exist'); + notification.getTasks().should('not.exist'); }); it('should have notifications', () => { diff --git a/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts b/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts index 2ab1b5002..09a2788eb 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts +++ b/ceph/src/pybind/mgr/dashboard/frontend/cypress/support/commands.ts @@ -86,8 +86,14 @@ function requestAuth(username: string, password: string, url = '') { } // @ts-ignore -Cypress.Commands.add('text', { prevSubject: true }, (subject: any) => { - return subject.text(); +Cypress.Commands.add('text', { prevSubject: true }, ($element: JQuery) => { + cy.wrap($element).scrollIntoView(); + return cy + .wrap($element) + .invoke('text') + .then((text: string) => { + return text.toString(); + }); }); Cypress.Commands.add('logToConsole', (message: string, optional?: any) => { diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/119.066087561586659c.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/119.066087561586659c.js new file mode 100644 index 000000000..6ff8073c1 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/119.066087561586659c.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[119],{22119:(xn,Ae,r)=>{r.r(Ae),r.d(Ae,{PoolModule:()=>qe,RoutedPoolModule:()=>Dn});var C=r(88692),l=r(20092),Oe=r(54247),f=r(51389),M=r(79512),f_=r(44466),E_=r(39025),g_=r(370),p_=r(23815),g=r.n(p_),R_=r(7357),m_=r(26504),ue=r(80842);class T{constructor(){this.nodes=[],this.idTree={},this.allDevices=[],this.buckets=[],this.failureDomains={},this.failureDomainKeys=[],this.devices=[],this.deviceCount=0}static searchFailureDomains(n,_){return this.getFailureDomains(this.search(n,_))}static search(n,_){const[o,i]=_.split("~"),s=n.find(c=>["name","id","type"].some(d=>c[d]===o));return s?(n=this.getSubNodes(s,this.createIdTreeFromNodes(n)),i&&(n=this.filterNodesByDeviceType(n,i)),n):[]}static createIdTreeFromNodes(n){const _={};return n.forEach(o=>{_[o.id]=o}),_}static getSubNodes(n,_){let o=[n];return n.children&&n.children.forEach(i=>{o=o.concat(this.getSubNodes(_[i],_))}),o}static filterNodesByDeviceType(n,_){let i,o=n.filter(c=>c.device_class&&c.device_class!==_).map(c=>c.id),s=o;do{i=!1,n=n.filter(d=>!o.includes(d.id));const c=[];n.forEach(d=>{d.children&&d.children.every(P=>o.includes(P))&&(c.push(d.id),i=!0)}),i&&(o=c,s=s.concat(c))}while(i);return(n=g().cloneDeep(n)).map(c=>(c.children&&(c.children=c.children.filter(d=>!s.includes(d))),c))}static getFailureDomains(n){const _={};return n.forEach(o=>{const i=o.type;_[i]||(_[i]=[]),_[i].push(o)}),_}initCrushNodeSelection(n,_,o,i){this.nodes=n,this.idTree=T.createIdTreeFromNodes(n),n.forEach(s=>{this.idTree[s.id]=s}),this.buckets=g().sortBy(n.filter(s=>s.children),"name"),this.controls={root:_,failure:o,device:i},this.preSelectRoot(),this.controls.root.valueChanges.subscribe(()=>this.onRootChange()),this.controls.failure.valueChanges.subscribe(()=>this.onFailureDomainChange()),this.controls.device.valueChanges.subscribe(()=>this.onDeviceChange())}preSelectRoot(){const n=this.nodes.find(_=>"root"===_.type);this.silentSet(this.controls.root,n),this.onRootChange()}silentSet(n,_){n.setValue(_,{emitEvent:!1})}onRootChange(){const n=T.getSubNodes(this.controls.root.value,this.idTree),_=T.getFailureDomains(n);Object.keys(_).forEach(o=>{_[o].length<=1&&delete _[o]}),this.failureDomains=_,this.failureDomainKeys=Object.keys(_).sort(),this.updateFailureDomain()}updateFailureDomain(){let n=this.getIncludedCustomValue(this.controls.failure,Object.keys(this.failureDomains));""===n&&(n=this.setMostCommonDomain(this.controls.failure)),this.updateDevices(n)}getIncludedCustomValue(n,_){return n.dirty&&_.includes(n.value)?n.value:""}setMostCommonDomain(n){let _={n:0,type:""};return Object.keys(this.failureDomains).forEach(o=>{const i=this.failureDomains[o].length;_.nT.getSubNodes(i,this.idTree)));this.allDevices=_.filter(i=>i.device_class).map(i=>i.device_class),this.devices=g().uniq(this.allDevices).sort();const o=1===this.devices.length?this.devices[0]:this.getIncludedCustomValue(this.controls.device,this.devices);this.silentSet(this.controls.device,o),this.onDeviceChange(o)}onDeviceChange(n=this.controls.device.value){this.deviceCount=""===n?this.allDevices.length:this.allDevices.filter(_=>_===n).length}}var Fe=r(30982),C_=r(14745),b=r(65862),M_=r(93614),Ne=r(95463),E=r(90070),h_=r(30633),v=r(76111),S_=r(47557),T_=r(28211),de=r(32337),e=r(64537),be=r(62862),ve=r(83608),Pe=r(18372),$e=r(60312),fe=r(30839),Ee=r(82945),ge=r(87925),pe=r(94276),Re=r(56310),me=r(41582),Ce=r(10545);function L_(t,n){1&t&&(e.TgZ(0,"span",30),e.SDv(1,31),e.qZA())}function A_(t,n){1&t&&(e.TgZ(0,"span",30),e.SDv(1,32),e.qZA())}function F_(t,n){1&t&&(e.TgZ(0,"span",30),e.SDv(1,33),e.qZA())}function N_(t,n){1&t&&(e.TgZ(0,"option",26),e.SDv(1,34),e.qZA())}function b_(t,n){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function v_(t,n){1&t&&(e.TgZ(0,"span",30),e.SDv(1,36),e.qZA())}function $_(t,n){1&t&&(e.TgZ(0,"option",26),e.SDv(1,37),e.qZA())}function I_(t,n){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function D_(t,n){1&t&&(e.TgZ(0,"span",30),e.SDv(1,38),e.qZA())}function x_(t,n){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let y_=(()=>{class t extends T{constructor(_,o,i,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=i,this.crushRuleService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.crushRuleService.formTooltips,this.action=this.actionLabels.CREATE,this.resource="Crush Rule",this.createForm()}createForm(){this.form=this.formBuilder.group({name:["",[l.kI.required,l.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],root:null,failure_domain:"",device_class:""})}ngOnInit(){this.crushRuleService.getInfo().subscribe(({names:_,nodes:o})=>{this.initCrushNodeSelection(o,this.form.get("root"),this.form.get("failure_domain"),this.form.get("device_class")),this.names=_})}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=g().cloneDeep(this.form.value);_.root=_.root.name,""===_.device_class&&delete _.device_class,this.taskWrapper.wrapTaskAroundCall({task:new v.R("crushRule/create",_),call:this.crushRuleService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(be.O),e.Y36(f.Kz),e.Y36(de.P),e.Y36(ve.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-crush-rule-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:55,vars:27,consts:function(){let n,_,o,i,s,c,d,P,p,R,h,S,m;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Root",i="Failure domain type",s="Device class",c="Let Ceph decide",d="This field is required!",P="The name can only consist of alphanumeric characters, dashes and underscores.",p="The chosen erasure code profile name is already in use.",R="Loading...",h="This field is required!",S="Loading...",m="This field is required!",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"required"],[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","root",1,"cd-col-form-label"],o,[3,"html"],["id","root","name","root","formControlName","root",1,"form-select"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","failure_domain",1,"cd-col-form-label"],i,["id","failure_domain","name","failure_domain","formControlName","failure_domain",1,"form-select"],["for","device_class",1,"cd-col-form-label"],s,["id","device_class","name","device_class","formControlName","device_class",1,"form-select"],["ngValue",""],c,[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,P,p,R,[3,"ngValue"],h,S,m]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.ynx(11),e.SDv(12,9),e.BQk(),e._UZ(13,"span",10),e.qZA(),e.TgZ(14,"div",11),e._UZ(15,"input",12),e.YNc(16,L_,2,0,"span",13),e.YNc(17,A_,2,0,"span",13),e.YNc(18,F_,2,0,"span",13),e.qZA()(),e.TgZ(19,"div",7)(20,"label",14),e.ynx(21),e.SDv(22,15),e.BQk(),e._UZ(23,"cd-helper",16)(24,"span",10),e.qZA(),e.TgZ(25,"div",11)(26,"select",17),e.YNc(27,N_,2,0,"option",18),e.YNc(28,b_,2,2,"option",19),e.qZA(),e.YNc(29,v_,2,0,"span",13),e.qZA()(),e.TgZ(30,"div",7)(31,"label",20),e.ynx(32),e.SDv(33,21),e.BQk(),e._UZ(34,"cd-helper",16)(35,"span",10),e.qZA(),e.TgZ(36,"div",11)(37,"select",22),e.YNc(38,$_,2,0,"option",18),e.YNc(39,I_,2,3,"option",19),e.qZA(),e.YNc(40,D_,2,0,"span",13),e.qZA()(),e.TgZ(41,"div",7)(42,"label",23),e.ynx(43),e.SDv(44,24),e.BQk(),e._UZ(45,"cd-helper",16),e.qZA(),e.TgZ(46,"div",11)(47,"select",25)(48,"option",26),e.SDv(49,27),e.qZA(),e.YNc(50,x_,2,2,"option",19),e.qZA()()()(),e.TgZ(51,"div",28)(52,"cd-form-button-panel",29),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(53,"titlecase"),e.ALo(54,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,19,o.action))(e.lcZ(4,21,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(10),e.Q6J("ngIf",o.form.showError("name",i,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",i,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",i,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.root),e.xp6(4),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(1),e.Q6J("ngIf",o.form.showError("root",i,"required")),e.xp6(5),e.Q6J("html",o.tooltips.failure_domain),e.xp6(4),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.form.showError("failure_domain",i,"required")),e.xp6(5),e.Q6J("html",o.tooltips.device_class),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(53,23,o.action)+" "+e.lcZ(54,25,o.resource))}},dependencies:[C.sg,C.O5,Pe.S,$e.z,fe.p,Ee.U,ge.o,pe.b,Re.P,me.V,l._Y,l.YN,l.Kr,l.Fj,l.EJ,l.JJ,l.JL,l.sg,l.u,C.rS,Ce.m]}),t})();class Z_{}var U_=r(35732);let Me=(()=>{class t{constructor(_){this.http=_,this.apiPath="api/erasure_code_profile",this.formTooltips={k:"Each object is split in data-chunks parts, each stored on a different OSD.",m:"Compute coding chunks for each object and store them on different OSDs.\n The number of coding chunks is also the number of OSDs that can be down without losing data.",plugins:{jerasure:{description:"The jerasure plugin is the most generic and flexible plugin,\n it is also the default for Ceph erasure coded pools.",technique:"The more flexible technique is reed_sol_van : it is enough to set k\n and m. The cauchy_good technique can be faster but you need to chose the packetsize\n carefully. All of reed_sol_r6_op, liberation, blaum_roth, liber8tion are RAID6 equivalents\n in the sense that they can only be configured with m=2.",packetSize:"The encoding will be done on packets of bytes size at a time.\n Choosing the right packet size is difficult.\n The jerasure documentation contains extensive information on this topic."},lrc:{description:"With the jerasure plugin, when an erasure coded object is stored on\n multiple OSDs, recovering from the loss of one OSD requires reading from all the others.\n For instance if jerasure is configured with k=8 and m=4, losing one OSD requires reading\n from the eleven others to repair.\n\n The lrc erasure code plugin creates local parity chunks to be able to recover using\n less OSDs. For instance if lrc is configured with k=8, m=4 and l=4, it will create\n an additional parity chunk for every four OSDs. When a single OSD is lost, it can be\n recovered with only four OSDs instead of eleven.",l:"Group the coding and data chunks into sets of size locality. For instance,\n for k=4 and m=2, when locality=3 two groups of three are created. Each set can\n be recovered without reading chunks from another set.",crushLocality:"The type of the crush bucket in which each set of chunks defined\n by l will be stored. For instance, if it is set to rack, each group of l chunks will be\n placed in a different rack. It is used to create a CRUSH rule step such as step choose\n rack. If it is not set, no such grouping is done."},isa:{description:"The isa plugin encapsulates the ISA library. It only runs on Intel processors.",technique:"The ISA plugin comes in two Reed Solomon forms.\n If reed_sol_van is set, it is Vandermonde, if cauchy is set, it is Cauchy."},shec:{description:"The shec plugin encapsulates the multiple SHEC library.\n It allows ceph to recover data more efficiently than Reed Solomon codes.",c:"The number of parity chunks each of which includes each data chunk in its\n calculation range. The number is used as a durability estimator. For instance, if c=2,\n 2 OSDs can be down without losing data."},clay:{description:"CLAY (short for coupled-layer) codes are erasure codes designed to\n bring about significant savings in terms of network bandwidth and disk IO when a failed\n node/OSD/rack is being repaired.",d:"Number of OSDs requested to send data during recovery of a single chunk.\n d needs to be chosen such that k+1 <= d <= k+m-1. The larger the d, the better\n the savings.",scalar_mds:"scalar_mds specifies the plugin that is used as a building block\n in the layered construction. It can be one of jerasure, isa, shec.",technique:"technique specifies the technique that will be picked\n within the 'scalar_mds' plugin specified. Supported techniques\n are 'reed_sol_van', 'reed_sol_r6_op', 'cauchy_orig',\n 'cauchy_good', 'liber8tion' for jerasure, 'reed_sol_van',\n 'cauchy' for isa and 'single', 'multiple' for shec."}},crushRoot:"The name of the crush bucket used for the first step of the CRUSH rule.\n For instance step take default.",crushFailureDomain:"Ensure that no two chunks are in a bucket with the same failure\n domain. For instance, if the failure domain is host no two chunks will be stored on the same\n host. It is used to create a CRUSH rule step such as step chooseleaf host.",crushDeviceClass:"Restrict placement to devices of a specific class\n (e.g., ssd or hdd), using the crush device class names in the CRUSH map.",directory:"Set the directory name from which the erasure code plugin is loaded."}}list(){return this.http.get(this.apiPath)}create(_){return this.http.post(this.apiPath,_,{observe:"response"})}delete(_){return this.http.delete(`${this.apiPath}/${_}`,{observe:"response"})}getInfo(){return this.http.get(`ui-${this.apiPath}/info`)}}return t.\u0275fac=function(_){return new(_||t)(e.LFG(U_.eN))},t.\u0275prov=e.Yz7({token:t,factory:t.\u0275fac,providedIn:"root"}),t})();function G_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,47),e.qZA())}function H_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,48),e.qZA())}function z_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,49),e.qZA())}function q_(t,n){1&t&&(e.TgZ(0,"option",37),e.SDv(1,50),e.qZA())}function X_(t,n){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function Q_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,52),e.qZA())}function w_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,53),e.qZA())}function J_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,54),e.qZA())}function k_(t,n){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,55),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function V_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,56),e.qZA())}function Y_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,57),e.qZA())}function B_(t,n){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,58),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.lrcMultiK),e.QtT(1)}}function j_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,59),e.qZA())}function K_(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,60),e.qZA())}function W_(t,n){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,61),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function eo(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,65),e.qZA())}function _o(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,66),e.qZA())}function oo(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",62)(2,"span",14),e.SDv(3,63),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",64),e.YNc(7,eo,2,0,"span",12),e.YNc(8,_o,2,0,"span",12),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.shec.c),e.xp6(3),e.Q6J("ngIf",_.form.showError("c",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("c",o,"cGreaterM"))}}function to(t,n){1&t&&(e.TgZ(0,"span",39),e.SDv(1,74),e.qZA())}function no(t,n){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,75),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMin())(_.getDMax()),e.QtT(1)}}function io(t,n){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,76),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function so(t,n){if(1&t&&(e.ynx(0),e.YNc(1,no,2,2,"span",23),e.YNc(2,io,2,1,"span",23),e.BQk()),2&t){const _=e.oxw(2);e.xp6(1),e.Q6J("ngIf",_.getDMin()<_.getDMax()),e.xp6(1),e.Q6J("ngIf",_.getDMin()===_.getDMax())}}function ao(t,n){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,77),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMin()),e.QtT(1)}}function lo(t,n){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,78),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function ro(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",7)(1,"label",67)(2,"span",14),e.SDv(3,68),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"div",69),e._UZ(7,"input",70),e.TgZ(8,"button",71),e.NdJ("click",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.toggleDCalc())}),e._UZ(9,"i",72),e.qZA()(),e.YNc(10,to,2,0,"span",23),e.YNc(11,so,3,2,"ng-container",73),e.YNc(12,ao,2,1,"span",12),e.YNc(13,lo,2,1,"span",12),e.qZA()()}if(2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.d),e.xp6(5),e.Q6J("ngClass",_.dCalc?_.icons.unlock:_.icons.lock),e.xp6(1),e.Q6J("ngIf",_.dCalc),e.xp6(1),e.Q6J("ngIf",!_.dCalc),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMin")),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMax"))}}function co(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,83),e.qZA())}function Oo(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,84),e.qZA())}function uo(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,85),e.qZA())}function Po(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",79)(2,"span",14),e.SDv(3,80),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",81),e.YNc(7,co,2,0,"span",12),e.YNc(8,Oo,2,0,"span",12),e.YNc(9,uo,2,0,"span",12),e.TgZ(10,"span",39),e.SDv(11,82),e.qZA()()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.l),e.xp6(3),e.Q6J("ngIf",_.form.showError("l",o,"required")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"unequal")),e.xp6(2),e.pQV(_.lrcGroups),e.QtT(11)}}function fo(t,n){1&t&&(e.TgZ(0,"option",37),e.SDv(1,86),e.qZA())}function Eo(t,n){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function go(t,n){1&t&&(e.TgZ(0,"option",37),e.SDv(1,90),e.qZA())}function po(t,n){1&t&&(e.TgZ(0,"option",37),e.SDv(1,91),e.qZA())}function Ro(t,n){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(2);e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function mo(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",87),e.ynx(2),e.SDv(3,88),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"select",89),e.YNc(7,go,2,0,"option",18),e.YNc(8,po,2,0,"option",18),e.YNc(9,Ro,2,3,"option",19),e.qZA()()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.crushLocality),e.xp6(3),e.Q6J("ngIf",!_.failureDomains),e.xp6(1),e.Q6J("ngIf",_.failureDomainKeys.length>0),e.xp6(1),e.Q6J("ngForOf",_.failureDomainKeys)}}function Co(t,n){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}const Ie=function(t,n,_){return[t,n,_]};function Mo(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",92),e.ynx(2),e.SDv(3,93),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"select",94),e.YNc(7,Co,2,2,"option",19),e.qZA()()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.scalar_mds),e.xp6(3),e.Q6J("ngForOf",e.kEZ(2,Ie,_.PLUGIN.JERASURE,_.PLUGIN.ISA,_.PLUGIN.SHEC))}}function ho(t,n){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function So(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",95),e.ynx(2),e.SDv(3,96),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"select",97),e.YNc(7,ho,2,2,"option",19),e.qZA()()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins[_.plugin].technique),e.xp6(3),e.Q6J("ngForOf",_.techniques)}}function To(t,n){1&t&&(e.TgZ(0,"span",46),e.SDv(1,101),e.qZA())}function Lo(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",98),e.ynx(2),e.SDv(3,99),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",100),e.YNc(7,To,2,0,"span",12),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.jerasure.packetSize),e.xp6(3),e.Q6J("ngIf",_.form.showError("packetSize",o,"min"))}}function Ao(t,n){1&t&&(e.TgZ(0,"option",37),e.SDv(1,102),e.qZA())}function Fo(t,n){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function No(t,n){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let bo=(()=>{class t extends T{constructor(_,o,i,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=i,this.ecpService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.ecpService.formTooltips,this.PLUGIN={LRC:"lrc",SHEC:"shec",CLAY:"clay",JERASURE:"jerasure",ISA:"isa"},this.plugin=this.PLUGIN.JERASURE,this.icons=b.P,this.action=this.actionLabels.CREATE,this.resource="EC Profile",this.createForm(),this.setJerasureDefaults()}createForm(){this.form=this.formBuilder.group({name:[null,[l.kI.required,l.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],plugin:[this.PLUGIN.JERASURE,[l.kI.required]],k:[4,[l.kI.required,E.h.custom("max",()=>this.baseValueValidation(!0)),E.h.custom("unequal",_=>this.lrcDataValidation(_)),E.h.custom("kLowerM",_=>this.shecDataValidation(_))]],m:[2,[l.kI.required,E.h.custom("max",()=>this.baseValueValidation())]],crushFailureDomain:"",crushRoot:null,crushDeviceClass:"",directory:"",technique:"reed_sol_van",packetSize:[2048],l:[3,[l.kI.required,E.h.custom("unequal",_=>this.lrcLocalityValidation(_))]],crushLocality:"",c:[2,[l.kI.required,E.h.custom("cGreaterM",_=>this.shecDurabilityValidation(_))]],d:[5,[l.kI.required,E.h.custom("dMin",_=>this.dMinValidation(_)),E.h.custom("dMax",_=>this.dMaxValidation(_))]],scalar_mds:[this.PLUGIN.JERASURE,[l.kI.required]]}),this.toggleDCalc(),this.form.get("k").valueChanges.subscribe(()=>this.updateValidityOnChange(["m","l","d"])),this.form.get("m").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","l","c","d"])),this.form.get("l").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","m"])),this.form.get("plugin").valueChanges.subscribe(_=>this.onPluginChange(_)),this.form.get("scalar_mds").valueChanges.subscribe(()=>this.setClayDefaultsForScalar())}baseValueValidation(_=!1){return this.validValidation(()=>this.getKMSum()>this.deviceCount&&this.form.getValue("k")>this.form.getValue("m")===_)}validValidation(_,o){return!((!this.form||o)&&this.plugin!==o)&&_()}getKMSum(){return this.form.getValue("k")+this.form.getValue("m")}lrcDataValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m"),i=this.form.getValue("l"),s=_+o;return this.lrcMultiK=_/(s/i),_%(s/i)!=0},"lrc")}shecDataValidation(_){return this.validValidation(()=>this.form.getValue("m")>_,"shec")}lrcLocalityValidation(_){return this.validValidation(()=>{const o=this.getKMSum();return this.lrcGroups=_>0?o/_:0,_>0&&o%_!=0},"lrc")}shecDurabilityValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m");return _>o},"shec")}dMinValidation(_){return this.validValidation(()=>this.getDMin()>_,"clay")}getDMin(){return this.form.getValue("k")+1}dMaxValidation(_){return this.validValidation(()=>_>this.getDMax(),"clay")}getDMax(){const _=this.form.getValue("m");return this.form.getValue("k")+_-1}toggleDCalc(){this.dCalc=!this.dCalc,this.form.get("d")[this.dCalc?"disable":"enable"](),this.calculateD()}calculateD(){this.plugin!==this.PLUGIN.CLAY||!this.dCalc||this.form.silentSet("d",this.getDMax())}updateValidityOnChange(_){_.forEach(o=>{"d"===o&&this.calculateD(),this.form.get(o).updateValueAndValidity({emitEvent:!1})})}onPluginChange(_){this.plugin=_,_===this.PLUGIN.JERASURE?this.setJerasureDefaults():_===this.PLUGIN.LRC?this.setLrcDefaults():_===this.PLUGIN.ISA?this.setIsaDefaults():_===this.PLUGIN.SHEC?this.setShecDefaults():_===this.PLUGIN.CLAY&&this.setClayDefaults(),this.updateValidityOnChange(["m"])}setJerasureDefaults(){this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liberation","blaum_roth","liber8tion"],this.setDefaults({k:4,m:2,technique:"reed_sol_van"})}setLrcDefaults(){this.setDefaults({k:4,m:2,l:3})}setIsaDefaults(){this.techniques=["reed_sol_van","cauchy"],this.setDefaults({k:7,m:3,technique:"reed_sol_van"})}setShecDefaults(){this.setDefaults({k:4,m:3,c:2})}setClayDefaults(){this.setDefaults({k:4,m:2,scalar_mds:this.PLUGIN.JERASURE}),this.setClayDefaultsForScalar()}setClayDefaultsForScalar(){const _=this.form.getValue("scalar_mds");let o="reed_sol_van";_===this.PLUGIN.JERASURE?this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liber8tion"]:_===this.PLUGIN.ISA?this.techniques=["reed_sol_van","cauchy"]:(o="single",this.techniques=["single","multiple"]),this.setDefaults({technique:o})}setDefaults(_){Object.keys(_).forEach(o=>{const i=this.form.get(o),s=i.value;i.pristine||"technique"===o&&!this.techniques.includes(s)||"k"===o&&[4,7].includes(s)||"m"===o&&[2,3].includes(s)?i.setValue(_[o]):i.updateValueAndValidity()})}ngOnInit(){this.ecpService.getInfo().subscribe(({plugins:_,names:o,directory:i,nodes:s})=>{this.initCrushNodeSelection(s,this.form.get("crushRoot"),this.form.get("crushFailureDomain"),this.form.get("crushDeviceClass")),this.plugins=_,this.names=o,this.form.silentSet("directory",i),this.preValidateNumericInputFields()})}preValidateNumericInputFields(){const _=["k","m","l","c","d"].map(o=>this.form.get(o));_.forEach(o=>{o.markAsTouched(),o.markAsDirty()}),_[1].updateValueAndValidity()}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=this.createJson();this.taskWrapper.wrapTaskAroundCall({task:new v.R("ecp/create",{name:_.name}),call:this.ecpService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}createJson(){const _={technique:[this.PLUGIN.ISA,this.PLUGIN.JERASURE,this.PLUGIN.CLAY],packetSize:[this.PLUGIN.JERASURE],l:[this.PLUGIN.LRC],crushLocality:[this.PLUGIN.LRC],c:[this.PLUGIN.SHEC],d:[this.PLUGIN.CLAY],scalar_mds:[this.PLUGIN.CLAY]},o=new Z_,i=this.form.getValue("plugin");return Object.keys(this.form.controls).filter(s=>{const c=_[s],d=this.form.getValue(s);return(c&&c.includes(i)||!c)&&d&&""!==d}).forEach(s=>{this.extendJson(s,o)}),o}extendJson(_,o){const s=this.form.getValue(_);o[{crushFailureDomain:"crush-failure-domain",crushRoot:"crush-root",crushDeviceClass:"crush-device-class",packetSize:"packetsize",crushLocality:"crush-locality"}[_]||_]="crushRoot"===_?s.name:s}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(be.O),e.Y36(f.Kz),e.Y36(de.P),e.Y36(Me),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-erasure-code-profile-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:98,vars:53,consts:function(){let n,_,o,i,s,c,d,P,p,R,h,S,m,u,A,$,I,D,x,y,Z,U,G,H,z,q,X,Q,w,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ne,ie,se,ae,le,re,ce;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Plugin",i="Data chunks (k)",s="Coding chunks (m)",c="Crush failure domain",d="Crush root",P="Crush device class",p="Let Ceph decide",R="Available OSDs: " + "\ufffd0\ufffd" + "",h="Directory",S="This field is required!",m="The name can only consist of alphanumeric characters, dashes and underscores.",u="The chosen erasure code profile name is already in use.",A="Loading...",$="This field is required!",I="This field is required!",D="Must be equal to or greater than 2.",x="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",y="For an equal distribution k has to be a multiple of (k+m)/l.",Z="K has to be equal to or greater than m in order to recover data correctly through c.",U="Distribution factor: " + "\ufffd0\ufffd" + "",G="This field is required!",H="Must be equal to or greater than 1.",z="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",q="Durability estimator (c)",X="Must be equal to or greater than 1.",Q="C has to be equal to or lower than m as m defines the amount of chunks that can be used.",w="Helper chunks (d)",J="Set d manually or use the plugin's default calculation that maximizes d.",k="D is automatically updated on k and m changes",V="D can be set from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + "",Y="D can only be set to " + "\ufffd0\ufffd" + "",B="D has to be greater than k (" + "\ufffd0\ufffd" + ").",j="D has to be lower than k + m (" + "\ufffd0\ufffd" + ").",K="Locality (l)",N="Locality groups: " + "\ufffd0\ufffd" + "",W="This field is required!",ee="Must be equal to or greater than 1.",_e="Can't split up chunks (k+m) correctly with the current locality.",oe="Loading...",te="Crush Locality",ne="Loading...",ie="None",se="Scalar mds",ae="Technique",le="Packetsize",re="Must be equal to or greater than 1.",ce="Loading...",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","plugin",1,"cd-col-form-label"],[1,"required"],o,[3,"html"],["id","plugin","name","plugin","formControlName","plugin",1,"form-select"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","k",1,"cd-col-form-label"],i,["type","number","id","k","name","k","ng-model","$ctrl.erasureCodeProfile.k","placeholder","Data chunks...","formControlName","k","min","2",1,"form-control"],["class","form-text text-muted",4,"ngIf"],["for","m",1,"cd-col-form-label"],s,["type","number","id","m","name","m","placeholder","Coding chunks...","formControlName","m","min","1",1,"form-control"],["class","form-group row",4,"ngIf"],["for","crushFailureDomain",1,"cd-col-form-label"],c,["id","crushFailureDomain","name","crushFailureDomain","formControlName","crushFailureDomain",1,"form-select"],["for","crushRoot",1,"cd-col-form-label"],d,["id","crushRoot","name","crushRoot","formControlName","crushRoot",1,"form-select"],["for","crushDeviceClass",1,"cd-col-form-label"],P,["id","crushDeviceClass","name","crushDeviceClass","formControlName","crushDeviceClass",1,"form-select"],["ngValue",""],p,[1,"form-text","text-muted"],R,["for","directory",1,"cd-col-form-label"],h,["type","text","id","directory","name","directory","placeholder","Path...","formControlName","directory",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],S,m,u,A,[3,"ngValue"],$,I,D,x,y,Z,U,G,H,z,["for","c",1,"cd-col-form-label"],q,["type","number","id","c","name","c","placeholder","Coding chunks...","formControlName","c","min","1",1,"form-control"],X,Q,["for","d",1,"cd-col-form-label"],w,[1,"input-group"],["type","number","id","d","name","d","placeholder","Helper chunks...","formControlName","d",1,"form-control"],["id","d-calc-btn","ngbTooltip",J,"type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],[4,"ngIf"],k,V,Y,B,j,["for","l",1,"cd-col-form-label"],K,["type","number","id","l","name","l","placeholder","Coding chunks...","formControlName","l","min","1",1,"form-control"],N,W,ee,_e,oe,["for","crushLocality",1,"cd-col-form-label"],te,["id","crushLocality","name","crushLocality","formControlName","crushLocality",1,"form-select"],ne,ie,["for","scalar_mds",1,"cd-col-form-label"],se,["id","scalar_mds","name","scalar_mds","formControlName","scalar_mds",1,"form-select"],["for","technique",1,"cd-col-form-label"],ae,["id","technique","name","technique","formControlName","technique",1,"form-select"],["for","packetSize",1,"cd-col-form-label"],le,["type","number","id","packetSize","name","packetSize","placeholder","Packetsize...","formControlName","packetSize","min","1",1,"form-control"],re,ce]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,G_,2,0,"span",12),e.YNc(15,H_,2,0,"span",12),e.YNc(16,z_,2,0,"span",12),e.qZA()(),e.TgZ(17,"div",7)(18,"label",13)(19,"span",14),e.SDv(20,15),e.qZA(),e._UZ(21,"cd-helper",16),e.qZA(),e.TgZ(22,"div",10)(23,"select",17),e.YNc(24,q_,2,0,"option",18),e.YNc(25,X_,2,2,"option",19),e.qZA(),e.YNc(26,Q_,2,0,"span",12),e.qZA()(),e.TgZ(27,"div",7)(28,"label",20)(29,"span",14),e.SDv(30,21),e.qZA(),e._UZ(31,"cd-helper",16),e.qZA(),e.TgZ(32,"div",10),e._UZ(33,"input",22),e.YNc(34,w_,2,0,"span",12),e.YNc(35,J_,2,0,"span",12),e.YNc(36,k_,2,1,"span",12),e.YNc(37,V_,2,0,"span",12),e.YNc(38,Y_,2,0,"span",12),e.YNc(39,B_,2,1,"span",23),e.qZA()(),e.TgZ(40,"div",7)(41,"label",24)(42,"span",14),e.SDv(43,25),e.qZA(),e._UZ(44,"cd-helper",16),e.qZA(),e.TgZ(45,"div",10),e._UZ(46,"input",26),e.YNc(47,j_,2,0,"span",12),e.YNc(48,K_,2,0,"span",12),e.YNc(49,W_,2,1,"span",12),e.qZA()(),e.YNc(50,oo,9,3,"div",27),e.YNc(51,ro,14,6,"div",27),e.YNc(52,Po,12,5,"div",27),e.TgZ(53,"div",7)(54,"label",28),e.ynx(55),e.SDv(56,29),e.BQk(),e._UZ(57,"cd-helper",16),e.qZA(),e.TgZ(58,"div",10)(59,"select",30),e.YNc(60,fo,2,0,"option",18),e.YNc(61,Eo,2,3,"option",19),e.qZA()()(),e.YNc(62,mo,10,4,"div",27),e.YNc(63,Mo,8,6,"div",27),e.YNc(64,So,8,2,"div",27),e.YNc(65,Lo,8,2,"div",27),e.TgZ(66,"div",7)(67,"label",31),e.ynx(68),e.SDv(69,32),e.BQk(),e._UZ(70,"cd-helper",16),e.qZA(),e.TgZ(71,"div",10)(72,"select",33),e.YNc(73,Ao,2,0,"option",18),e.YNc(74,Fo,2,2,"option",19),e.qZA()()(),e.TgZ(75,"div",7)(76,"label",34),e.ynx(77),e.SDv(78,35),e.BQk(),e._UZ(79,"cd-helper",16),e.qZA(),e.TgZ(80,"div",10)(81,"select",36)(82,"option",37),e.SDv(83,38),e.qZA(),e.YNc(84,No,2,2,"option",19),e.qZA(),e.TgZ(85,"span",39),e.SDv(86,40),e.qZA()()(),e.TgZ(87,"div",7)(88,"label",41),e.ynx(89),e.SDv(90,42),e.BQk(),e._UZ(91,"cd-helper",16),e.qZA(),e.TgZ(92,"div",10),e._UZ(93,"input",43),e.qZA()()(),e.TgZ(94,"div",44)(95,"cd-form-button-panel",45),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(96,"titlecase"),e.ALo(97,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,41,o.action))(e.lcZ(4,43,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(8),e.Q6J("ngIf",o.form.showError("name",i,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",i,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",i,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.plugins[o.plugin].description),e.xp6(3),e.Q6J("ngIf",!o.plugins),e.xp6(1),e.Q6J("ngForOf",o.plugins),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",i,"required")),e.xp6(5),e.Q6J("html",o.tooltips.k),e.xp6(3),e.Q6J("ngIf",o.form.showError("k",i,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",i,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",i,"max")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",i,"unequal")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",i,"kLowerM")),e.xp6(1),e.Q6J("ngIf","lrc"===o.plugin),e.xp6(5),e.Q6J("html",o.tooltips.m),e.xp6(3),e.Q6J("ngIf",o.form.showError("m",i,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",i,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",i,"max")),e.xp6(1),e.Q6J("ngIf","shec"===o.plugin),e.xp6(1),e.Q6J("ngIf","clay"===o.plugin),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(5),e.Q6J("html",o.tooltips.crushFailureDomain),e.xp6(3),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(1),e.Q6J("ngIf",o.PLUGIN.CLAY===o.plugin),e.xp6(1),e.Q6J("ngIf",e.kEZ(49,Ie,o.PLUGIN.JERASURE,o.PLUGIN.ISA,o.PLUGIN.CLAY).includes(o.plugin)),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.JERASURE),e.xp6(5),e.Q6J("html",o.tooltips.crushRoot),e.xp6(3),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(5),e.Q6J("html",o.tooltips.crushDeviceClass),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.pQV(o.deviceCount),e.QtT(86),e.xp6(5),e.Q6J("html",o.tooltips.directory),e.xp6(4),e.Q6J("form",o.form)("submitText",e.lcZ(96,45,o.action)+" "+e.lcZ(97,47,o.resource))}},dependencies:[C.mk,C.sg,C.O5,Pe.S,$e.z,fe.p,Ee.U,ge.o,pe.b,Re.P,me.V,l._Y,l.YN,l.Kr,l.Fj,l.wV,l.EJ,l.JJ,l.JL,l.qQ,l.sg,l.u,f._L,C.rS,Ce.m]}),t})();var vo=r(7022);class $o{constructor(){this.erasureInfo=!1,this.crushInfo=!1,this.pgs=1,this.poolTypes=["erasure","replicated"],this.applications={selected:[],default:["cephfs","rbd","rgw"],available:[],validators:[l.kI.pattern("[A-Za-z0-9_]+"),l.kI.maxLength(128)],messages:new vo.a({empty:"No applications added",selectionLimit:{text:"Applications limit reached",tooltip:"A pool can only have up to four applications definitions."},customValidations:{pattern:"Allowed characters '_a-zA-Z0-9'",maxlength:"Maximum length is 128 characters"},filter:"Filter or add applications'",add:"Add application"})}}}var De=r(63285),xe=r(47640),Io=r(60192),Do=r(30490),ye=r(61350),xo=r(17932),yo=r(63622),Zo=r(60950);const Uo=["crushInfoTabs"],Go=["crushDeletionBtn"],Ho=["ecpInfoTabs"],zo=["ecpDeletionBtn"];function qo(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,42),e.qZA())}function Xo(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,43),e.qZA())}function Qo(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,44),e.qZA())}function wo(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,45),e.qZA())}function Jo(t,n){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function ko(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,47),e.qZA())}function Vo(t,n){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Yo(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,58),e.qZA())}function Bo(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,59),e.qZA())}function jo(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,60),e.qZA())}function Ko(t,n){1&t&&(e.TgZ(0,"span",55),e.SDv(1,61),e.qZA())}function Wo(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",8)(1,"label",52),e.SDv(2,53),e.qZA(),e.TgZ(3,"div",11)(4,"input",54),e.NdJ("focus",function(){e.CHM(_);const i=e.oxw(3);return e.KtG(i.externalPgChange=!1)})("blur",function(){e.CHM(_);const i=e.oxw(3);return e.KtG(i.alignPgs())}),e.qZA(),e.YNc(5,Yo,2,0,"span",13),e.YNc(6,Bo,2,0,"span",13),e.YNc(7,jo,2,0,"span",13),e.TgZ(8,"span",55),e._UZ(9,"cd-doc",56),e.qZA(),e.YNc(10,Ko,2,0,"span",57),e.qZA()()}if(2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.form.showError("pgNum",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"34")),e.xp6(3),e.Q6J("ngIf",o.externalPgChange)}}function et(t,n){if(1&t&&(e.TgZ(0,"span",41)(1,"ul",66)(2,"li"),e.SDv(3,67),e.qZA(),e.TgZ(4,"li"),e.SDv(5,68),e.qZA()()()),2&t){const _=e.oxw(4);e.xp6(3),e.pQV(_.getMinSize()),e.QtT(3),e.xp6(2),e.pQV(_.getMaxSize()),e.QtT(5)}}function _t(t,n){if(1&t&&(e.TgZ(0,"span",41),e.SDv(1,69),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.pQV(_.getMinSize())(_.getMaxSize()),e.QtT(1)}}function ot(t,n){1&t&&(e.TgZ(0,"span",70),e.SDv(1,71),e.qZA())}function tt(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",62),e.SDv(2,63),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",64),e.YNc(5,et,6,2,"span",13),e.YNc(6,_t,2,2,"span",13),e.YNc(7,ot,2,0,"span",65),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(4),e.Q6J("max",o.getMaxSize())("min",o.getMinSize()),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",1===o.form.getValue("size"))}}function nt(t,n){1&t&&(e.TgZ(0,"div",8)(1,"label",72),e.SDv(2,73),e.qZA(),e.TgZ(3,"div",11)(4,"div",74),e._UZ(5,"input",75),e.TgZ(6,"label",76),e.SDv(7,77),e.qZA()()()())}function it(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",8)(2,"label",48),e.SDv(3,49),e.qZA(),e.TgZ(4,"div",11)(5,"select",50),e.YNc(6,Vo,2,2,"option",19),e.qZA()()(),e.YNc(7,Wo,11,4,"div",51),e.YNc(8,tt,8,5,"div",51),e.YNc(9,nt,8,0,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(6),e.Q6J("ngForOf",_.pgAutoscaleModes),e.xp6(1),e.Q6J("ngIf","on"!==_.form.getValue("pgAutoscaleMode")),e.xp6(1),e.Q6J("ngIf",_.isReplicated),e.xp6(1),e.Q6J("ngIf",_.info.is_all_bluestore&&_.isErasure)}}function st(t,n){if(1&t&&e._UZ(0,"i",78),2&t){const _=e.oxw(2);e.Gre("",_.icons.warning," icon-warning-color")}}function at(t,n){1&t&&(e.TgZ(0,"option",17),e.SDv(1,92),e.qZA())}function lt(t,n){1&t&&(e.TgZ(0,"option",93),e.SDv(1,94),e.qZA()),2&t&&e.Q6J("ngValue",null)}function rt(t,n){1&t&&(e.TgZ(0,"option",93),e.SDv(1,95),e.qZA()),2&t&&e.Q6J("ngValue",null)}function ct(t,n){if(1&t&&(e.TgZ(0,"option",93),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}const F=function(t){return[t]};function Ot(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"button",96),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(4);return e.KtG(i.addErasureCodeProfile())}),e._UZ(1,"i",88),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function dt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"button",97,98),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(4);return e.KtG(i.deleteErasureCodeProfile())}),e._UZ(2,"i",88),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const ut=function(){return["name"]};function Pt(t,n){if(1&t&&e._UZ(0,"cd-table-key-value",109),2&t){const _=e.oxw(5);e.Q6J("renderObjects",!0)("hideKeys",e.DdM(4,ut))("data",_.form.getValue("erasureProfile"))("autoReload",!1)}}function ft(t,n){1&t&&(e.TgZ(0,"span"),e.SDv(1,112),e.qZA())}function Et(t,n){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.xp6(1),e.hij(" ",_," ")}}function gt(t,n){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,Et,2,1,"li",113),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.ecpUsage)}}function pt(t,n){if(1&t&&(e.YNc(0,ft,2,0,"ng-template",null,110,e.W1O),e.YNc(2,gt,2,1,"ul",111)),2&t){const _=e.MAs(1),o=e.oxw(5);e.xp6(2),e.Q6J("ngIf",o.ecpUsage)("ngIfElse",_)}}function Rt(t,n){if(1&t&&(e.TgZ(0,"span",99)(1,"nav",100,101),e.ynx(3,102),e.TgZ(4,"a",103),e.SDv(5,104),e.qZA(),e.YNc(6,Pt,1,5,"ng-template",105),e.BQk(),e.ynx(7,106),e.TgZ(8,"a",103),e.SDv(9,107),e.qZA(),e.YNc(10,pt,3,2,"ng-template",105),e.BQk(),e.qZA(),e._UZ(11,"div",108),e.qZA()),2&t){const _=e.MAs(2);e.xp6(11),e.Q6J("ngbNavOutlet",_)}}const Ze=function(t){return{active:t}};function mt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",8)(1,"label",80),e.SDv(2,81),e.qZA(),e.TgZ(3,"div",11)(4,"div",82)(5,"select",83),e.YNc(6,at,2,0,"option",84),e.YNc(7,lt,2,1,"option",85),e.YNc(8,rt,2,1,"option",85),e.YNc(9,ct,2,2,"option",86),e.qZA(),e.TgZ(10,"button",87),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(3);return e.KtG(i.data.erasureInfo=!i.data.erasureInfo)}),e._UZ(11,"i",88),e.qZA(),e.YNc(12,Ot,2,3,"button",89),e.YNc(13,dt,3,3,"button",90),e.qZA(),e.YNc(14,Rt,12,1,"span",91),e.qZA()()}if(2&t){const _=e.oxw(3);e.xp6(6),e.Q6J("ngIf",!_.ecProfiles),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&0===_.ecProfiles.length),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&_.ecProfiles.length>0),e.xp6(1),e.Q6J("ngForOf",_.ecProfiles),e.xp6(1),e.Q6J("ngClass",e.VKq(9,Ze,_.data.erasureInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,_.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",_.data.erasureInfo&&_.form.getValue("erasureProfile"))}}function Ct(t,n){1&t&&(e.TgZ(0,"div",8)(1,"label",114),e.SDv(2,115),e.qZA(),e.TgZ(3,"div",11)(4,"span",55),e.SDv(5,116),e.qZA()()())}function Mt(t,n){1&t&&(e.TgZ(0,"span",55)(1,"span"),e.SDv(2,119),e.qZA(),e._uU(3,"\xa0 "),e.qZA())}function ht(t,n){if(1&t&&(e.TgZ(0,"option",93),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.rule_name," ")}}function St(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"button",96),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(5);return e.KtG(i.addCrushRule())}),e._UZ(1,"i",88),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function Tt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"button",126,127),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(5);return e.KtG(i.deleteCrushRule())}),e._UZ(2,"i",88),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const Lt=function(){return["steps","type","rule_name"]};function At(t,n){if(1&t&&e._UZ(0,"cd-table-key-value",109),2&t){const _=e.oxw(6);e.Q6J("renderObjects",!1)("hideKeys",e.DdM(4,Lt))("data",_.form.getValue("crushRule"))("autoReload",!1)}}function Ft(t,n){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(7);e.xp6(1),e.hij(" ",o.describeCrushStep(_)," ")}}function Nt(t,n){if(1&t&&(e.TgZ(0,"ol"),e.YNc(1,Ft,2,1,"li",113),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.form.get("crushRule").value.steps)}}function bt(t,n){1&t&&(e.TgZ(0,"span"),e.SDv(1,136),e.qZA())}function vt(t,n){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.xp6(1),e.hij(" ",_," ")}}function $t(t,n){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,vt,2,1,"li",113),e.qZA()),2&t){const _=e.oxw(7);e.xp6(1),e.Q6J("ngForOf",_.crushUsage)}}function It(t,n){if(1&t&&(e.YNc(0,bt,2,0,"ng-template",null,135,e.W1O),e.YNc(2,$t,2,1,"ul",111)),2&t){const _=e.MAs(1),o=e.oxw(6);e.xp6(2),e.Q6J("ngIf",o.crushUsage)("ngIfElse",_)}}function Dt(t,n){if(1&t&&(e.TgZ(0,"div",128)(1,"nav",100,129),e.ynx(3,130),e.TgZ(4,"a",103),e.SDv(5,131),e.qZA(),e.YNc(6,At,1,5,"ng-template",105),e.BQk(),e.ynx(7,132),e.TgZ(8,"a",103),e.SDv(9,133),e.qZA(),e.YNc(10,Nt,2,1,"ng-template",105),e.BQk(),e.ynx(11,106),e.TgZ(12,"a",103),e.SDv(13,134),e.qZA(),e.YNc(14,It,3,2,"ng-template",105),e.BQk(),e.qZA(),e._UZ(15,"div",108),e.qZA()),2&t){const _=e.MAs(2);e.xp6(15),e.Q6J("ngbNavOutlet",_)}}function xt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,137),e.qZA())}function yt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,138),e.qZA())}function Zt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",120)(2,"select",121)(3,"option",93),e.SDv(4,122),e.qZA(),e.YNc(5,ht,2,2,"option",86),e.qZA(),e.TgZ(6,"button",123),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(4);return e.KtG(i.data.crushInfo=!i.data.crushInfo)}),e._UZ(7,"i",88),e.qZA(),e.YNc(8,St,2,3,"button",89),e.YNc(9,Tt,3,3,"button",124),e.qZA(),e.YNc(10,Dt,16,1,"div",125),e.YNc(11,xt,2,0,"span",13),e.YNc(12,yt,2,0,"span",13),e.qZA()}if(2&t){e.oxw(3);const _=e.MAs(2),o=e.oxw();e.xp6(3),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.current.rules),e.xp6(1),e.Q6J("ngClass",e.VKq(9,Ze,o.data.crushInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,o.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.data.crushInfo&&o.form.getValue("crushRule")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"tooFewOsds"))}}function Ut(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",114),e.SDv(2,117),e.qZA(),e.TgZ(3,"div",11),e.YNc(4,Mt,4,0,"ng-template",null,118,e.W1O),e.YNc(6,Zt,13,13,"div",111),e.qZA()()),2&t){const _=e.MAs(5),o=e.oxw(3);e.xp6(6),e.Q6J("ngIf",o.current.rules.length>0)("ngIfElse",_)}}function Gt(t,n){if(1&t&&(e.TgZ(0,"div")(1,"legend"),e.SDv(2,79),e.qZA(),e.YNc(3,mt,15,13,"div",51),e.YNc(4,Ct,6,0,"div",51),e.YNc(5,Ut,7,2,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(3),e.Q6J("ngIf",_.isErasure),e.xp6(1),e.Q6J("ngIf",_.isErasure&&!_.editing),e.xp6(1),e.Q6J("ngIf",_.isReplicated||_.editing)}}function Ht(t,n){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function zt(t,n){1&t&&(e.TgZ(0,"option",17),e.SDv(1,156),e.qZA())}function qt(t,n){1&t&&(e.TgZ(0,"option",17),e.SDv(1,157),e.qZA())}function Xt(t,n){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Qt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,158),e.qZA())}function wt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,159),e.qZA())}function Jt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,160),e.qZA())}function kt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,161),e.qZA())}function Vt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,162),e.qZA())}function Yt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,163),e.qZA())}function Bt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,164),e.qZA())}function jt(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",8)(2,"label",144),e.SDv(3,145),e.qZA(),e.TgZ(4,"div",11)(5,"select",146),e.YNc(6,zt,2,0,"option",84),e.YNc(7,qt,2,0,"option",84),e.YNc(8,Xt,2,2,"option",19),e.qZA()()(),e.TgZ(9,"div",8)(10,"label",147),e.SDv(11,148),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",149),e.YNc(14,Qt,2,0,"span",13),e.YNc(15,wt,2,0,"span",13),e.YNc(16,Jt,2,0,"span",13),e.qZA()(),e.TgZ(17,"div",8)(18,"label",150),e.SDv(19,151),e.qZA(),e.TgZ(20,"div",11),e._UZ(21,"input",152),e.YNc(22,kt,2,0,"span",13),e.YNc(23,Vt,2,0,"span",13),e.YNc(24,Yt,2,0,"span",13),e.qZA()(),e.TgZ(25,"div",8)(26,"label",153),e.SDv(27,154),e.qZA(),e.TgZ(28,"div",11),e._UZ(29,"input",155),e.YNc(30,Bt,2,0,"span",13),e.qZA()()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(6),e.Q6J("ngIf",!o.info.compression_algorithms),e.xp6(1),e.Q6J("ngIf",o.info.compression_algorithms&&0===o.info.compression_algorithms.length),e.xp6(1),e.Q6J("ngForOf",o.info.compression_algorithms),e.xp6(6),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"maximum")),e.xp6(1),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"pattern")),e.xp6(6),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"minimum")),e.xp6(1),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"pattern")),e.xp6(6),e.Q6J("ngIf",o.form.showError("ratio",_,"min")||o.form.showError("ratio",_,"max"))}}function Kt(t,n){if(1&t&&(e.TgZ(0,"div",139)(1,"legend"),e.SDv(2,140),e.qZA(),e.TgZ(3,"div",8)(4,"label",141),e.SDv(5,142),e.qZA(),e.TgZ(6,"div",11)(7,"select",143),e.YNc(8,Ht,2,2,"option",19),e.qZA()()(),e.YNc(9,jt,31,10,"div",20),e.qZA()),2&t){const _=e.oxw(2);e.xp6(8),e.Q6J("ngForOf",_.info.compression_modes),e.xp6(1),e.Q6J("ngIf",_.hasCompressionEnabled())}}function Wt(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,165),e.qZA())}function en(t,n){1&t&&(e.TgZ(0,"span",41),e.SDv(1,166),e.qZA())}function _n(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7)(9,"div",8)(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,qo,2,0,"span",13),e.YNc(15,Xo,2,0,"span",13),e.YNc(16,Qo,2,0,"span",13),e.YNc(17,wo,2,0,"span",13),e.qZA()(),e.TgZ(18,"div",8)(19,"label",14),e.SDv(20,15),e.qZA(),e.TgZ(21,"div",11)(22,"select",16)(23,"option",17),e.SDv(24,18),e.qZA(),e.YNc(25,Jo,2,2,"option",19),e.qZA(),e.YNc(26,ko,2,0,"span",13),e.qZA()(),e.YNc(27,it,10,4,"div",20),e.TgZ(28,"div",8)(29,"label",21),e.SDv(30,22),e.qZA(),e.TgZ(31,"div",11)(32,"cd-select-badges",23),e.NdJ("selection",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.appSelection())}),e.qZA(),e.YNc(33,st,1,3,"i",24),e.qZA()(),e.YNc(34,Gt,6,3,"div",20),e.YNc(35,Kt,10,2,"div",25),e.TgZ(36,"div")(37,"legend"),e.SDv(38,26),e.qZA(),e.TgZ(39,"div",8)(40,"label",27),e.ynx(41),e.SDv(42,28),e.BQk(),e.TgZ(43,"cd-helper")(44,"span"),e.SDv(45,29),e.qZA(),e._UZ(46,"br"),e.TgZ(47,"span"),e.SDv(48,30),e.qZA()()(),e.TgZ(49,"div",11),e._UZ(50,"input",31),e.YNc(51,Wt,2,0,"span",13),e.qZA()(),e.TgZ(52,"div",8)(53,"label",32),e.ynx(54),e.SDv(55,33),e.BQk(),e.TgZ(56,"cd-helper")(57,"span"),e.SDv(58,34),e.qZA(),e._UZ(59,"br"),e.TgZ(60,"span"),e.SDv(61,35),e.qZA()()(),e.TgZ(62,"div",11),e._UZ(63,"input",36),e.YNc(64,en,2,0,"span",13),e.qZA()()(),e.TgZ(65,"div",37)(66,"cd-rbd-configuration-form",38),e.NdJ("changes",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.currentConfigurationValues=i())}),e.qZA()()(),e.TgZ(67,"div",39)(68,"cd-form-button-panel",40),e.NdJ("submitActionEvent",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.submit())}),e.ALo(69,"titlecase"),e.ALo(70,"upperFirst"),e.qZA()()()()()}if(2&t){const _=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.form),e.xp6(6),e.pQV(e.lcZ(6,26,o.action))(e.lcZ(7,28,o.resource)),e.QtT(5),e.xp6(7),e.Q6J("ngIf",o.form.showError("name",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"uniqueName")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"rbdPool")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"pattern")),e.xp6(8),e.Q6J("ngForOf",o.data.poolTypes),e.xp6(1),e.Q6J("ngIf",o.form.showError("poolType",_,"required")),e.xp6(1),e.Q6J("ngIf",o.isReplicated||o.isErasure),e.xp6(5),e.Q6J("customBadges",!0)("customBadgeValidators",o.data.applications.validators)("messages",o.data.applications.messages)("data",o.data.applications.selected)("options",o.data.applications.available)("selectionLimit",4),e.xp6(1),e.Q6J("ngIf",o.data.applications.selected<=0),e.xp6(1),e.Q6J("ngIf",o.isErasure||o.isReplicated),e.xp6(1),e.Q6J("ngIf",o.info.is_all_bluestore),e.xp6(16),e.Q6J("ngIf",o.form.showError("max_bytes",_,"pattern")),e.xp6(13),e.Q6J("ngIf",o.form.showError("max_objects",_,"min")),e.xp6(1),e.Q6J("hidden",o.isErasure||-1===o.data.applications.selected.indexOf("rbd")),e.xp6(1),e.Q6J("form",o.form)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(69,30,o.action)+" "+e.lcZ(70,32,o.resource))}}let Ue=(()=>{class t extends M_.E{constructor(_,o,i,s,c,d,P,p,R,h,S){super(),this.dimlessBinaryPipe=_,this.route=o,this.router=i,this.modalService=s,this.poolService=c,this.authStorageService=d,this.formatter=P,this.taskWrapper=p,this.ecpService=R,this.crushRuleService=h,this.actionLabels=S,this.editing=!1,this.isReplicated=!1,this.isErasure=!1,this.data=new $o,this.externalPgChange=!1,this.current={rules:[]},this.initializeConfigData=new R_.t(1),this.currentConfigurationValues={},this.icons=b.P,this.crushUsage=void 0,this.ecpUsage=void 0,this.crushRuleMaxSize=10,this.editing=this.router.url.startsWith(`/pool/${M.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="pool",this.authenticate(),this.createForm()}authenticate(){if(this.permission=this.authStorageService.getPermissions().pool,!this.permission.read||!this.permission.update&&this.editing||!this.permission.create&&!this.editing)throw new m_._2}createForm(){const _=new Ne.d({mode:new l.p4("none"),algorithm:new l.p4(""),minBlobSize:new l.p4("",{updateOn:"blur"}),maxBlobSize:new l.p4("",{updateOn:"blur"}),ratio:new l.p4("",{updateOn:"blur"})});this.form=new Ne.d({name:new l.p4("",{validators:[l.kI.pattern(/^[.A-Za-z0-9_/-]+$/),l.kI.required,E.h.custom("rbdPool",()=>this.form&&this.form.getValue("name").includes("/")&&this.data&&-1!==this.data.applications.selected.indexOf("rbd"))]}),poolType:new l.p4("",{validators:[l.kI.required]}),crushRule:new l.p4(null,{validators:[E.h.custom("tooFewOsds",o=>this.info&&o&&this.info.osd_count<1),E.h.custom("required",o=>this.isReplicated&&this.info.crush_rules_replicated.length>0&&!o)]}),size:new l.p4("",{updateOn:"blur"}),erasureProfile:new l.p4(null),pgNum:new l.p4("",{validators:[l.kI.required]}),pgAutoscaleMode:new l.p4(null),ecOverwrites:new l.p4(!1),compression:_,max_bytes:new l.p4(""),max_objects:new l.p4(0)},[E.h.custom("form",()=>null)])}ngOnInit(){this.poolService.getInfo().subscribe(_=>{this.initInfo(_),this.editing?this.initEditMode():(this.setAvailableApps(),this.loadingReady()),this.listenToChanges(),this.setComplexValidators()})}initInfo(_){this.pgAutoscaleModes=_.pg_autoscale_modes,this.form.silentSet("pgAutoscaleMode",_.pg_autoscale_default_mode),this.form.silentSet("algorithm",_.bluestore_compression_algorithm),this.info=_,this.initEcp(_.erasure_code_profiles)}initEcp(_){this.setListControlStatus("erasureProfile",_),this.ecProfiles=_}setListControlStatus(_,o){const i=this.form.get(_),s=i.value;1!==o.length||s&&g().isEqual(s,o[0])?0===o.length&&s&&i.setValue(null):i.setValue(o[0]),o.length<=1?i.enabled&&i.disable():i.disabled&&i.enable()}initEditMode(){this.disableForEdit(),this.routeParamsSubscribe=this.route.params.subscribe(_=>this.poolService.get(_.name).subscribe(o=>{this.data.pool=o,this.initEditFormData(o),this.loadingReady()}))}disableForEdit(){["poolType","crushRule","size","erasureProfile","ecOverwrites"].forEach(_=>this.form.get(_).disable())}initEditFormData(_){this.initializeConfigData.next({initialData:_.configuration,sourceType:h_.h.pool}),this.poolTypeChange(_.type);const o=this.info.crush_rules_replicated.concat(this.info.crush_rules_erasure),i={name:_.pool_name,poolType:_.type,crushRule:o.find(s=>s.rule_name===_.crush_rule),size:_.size,erasureProfile:this.ecProfiles.find(s=>s.name===_.erasure_code_profile),pgAutoscaleMode:_.pg_autoscale_mode,pgNum:_.pg_num,ecOverwrites:_.flags_names.includes("ec_overwrites"),mode:_.options.compression_mode,algorithm:_.options.compression_algorithm,minBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_min_blob_size),maxBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_max_blob_size),ratio:_.options.compression_required_ratio,max_bytes:this.dimlessBinaryPipe.transform(_.quota_max_bytes),max_objects:_.quota_max_objects};Object.keys(i).forEach(s=>{const c=i[s];!g().isUndefined(c)&&""!==c&&this.form.silentSet(s,c)}),this.data.pgs=this.form.getValue("pgNum"),this.setAvailableApps(this.data.applications.default.concat(_.application_metadata)),this.data.applications.selected=_.application_metadata}setAvailableApps(_=this.data.applications.default){this.data.applications.available=g().uniq(_.sort()).map(o=>new C_.$(!1,o,""))}listenToChanges(){this.listenToChangesDuringAddEdit(),this.editing||this.listenToChangesDuringAdd()}listenToChangesDuringAddEdit(){this.form.get("pgNum").valueChanges.subscribe(_=>{const o=_-this.data.pgs;1===Math.abs(o)&&2!==_?this.doPgPowerJump(o):this.data.pgs=_})}doPgPowerJump(_){const o=this.calculatePgPower()+_;this.setPgs(-1===_?Math.round(o):Math.floor(o))}calculatePgPower(_=this.form.getValue("pgNum")){return Math.log(_)/Math.log(2)}setPgs(_){const o=Math.pow(2,_<0?0:_);this.data.pgs=o,this.form.silentSet("pgNum",o)}listenToChangesDuringAdd(){this.form.get("poolType").valueChanges.subscribe(_=>{this.poolTypeChange(_)}),this.form.get("crushRule").valueChanges.subscribe(_=>{this.crushDeletionBtn&&this.crushDeletionBtn.isOpen()&&this.crushDeletionBtn.close(),_&&(this.setCorrectMaxSize(_),this.crushRuleIsUsedBy(_.rule_name),this.replicatedRuleChange(),this.pgCalc())}),this.form.get("size").valueChanges.subscribe(()=>{this.pgCalc()}),this.form.get("erasureProfile").valueChanges.subscribe(_=>{this.ecpDeletionBtn&&this.ecpDeletionBtn.isOpen()&&this.ecpDeletionBtn.close(),_&&(this.ecpIsUsedBy(_.name),this.pgCalc())}),this.form.get("mode").valueChanges.subscribe(()=>{["minBlobSize","maxBlobSize","ratio"].forEach(_=>{this.form.get(_).updateValueAndValidity({emitEvent:!1})})}),this.form.get("minBlobSize").valueChanges.subscribe(()=>{this.form.get("maxBlobSize").updateValueAndValidity({emitEvent:!1})}),this.form.get("maxBlobSize").valueChanges.subscribe(()=>{this.form.get("minBlobSize").updateValueAndValidity({emitEvent:!1})})}poolTypeChange(_){if("replicated"===_?this.setTypeBooleans(!0,!1):this.setTypeBooleans(!1,"erasure"===_),!_||!this.info)return void(this.current.rules=[]);const o=this.info["crush_rules_"+_]||[];this.current.rules=o,!this.editing&&(this.isReplicated&&this.setListControlStatus("crushRule",o),this.replicatedRuleChange(),this.pgCalc())}setTypeBooleans(_,o){this.isReplicated=_,this.isErasure=o}replicatedRuleChange(){if(!this.isReplicated)return;const _=this.form.get("size");let o=this.form.getValue("size")||3;const i=this.getMinSize(),s=this.getMaxSize();os&&(o=s),o!==_.value&&this.form.silentSet("size",o)}getMinSize(){return!this.info||this.info.osd_count<1?0:1}getMaxSize(){const _=this.form.getValue("crushRule");return this.info?_?_.usable_size:Math.min(this.info.osd_count,3):0}pgCalc(){const _=this.form.getValue("poolType");if(!this.info||this.form.get("pgNum").dirty||!_)return;const o=100*this.info.osd_count,i=this.isReplicated?this.replicatedPgCalc(o):this.erasurePgCalc(o);if(!i)return;const s=this.data.pgs;this.alignPgs(i),this.externalPgChange||(this.externalPgChange=s!==this.data.pgs)}setCorrectMaxSize(_=this.form.getValue("crushRule")){if(!_)return;const i=T.searchFailureDomains(this.info.nodes,_.steps[0].item_name)[_.steps[1].type];_.usable_size=Math.min(i?i.length:this.crushRuleMaxSize,this.crushRuleMaxSize)}replicatedPgCalc(_){const o=this.form.get("size"),i=o.value;return o.valid&&i>0?_/i:0}erasurePgCalc(_){const o=this.form.get("erasureProfile"),i=o.value;return(o.valid||o.disabled)&&i?_/(i.k+i.m):0}alignPgs(_=this.form.getValue("pgNum")){this.setPgs(Math.round(this.calculatePgPower(_<1?1:_)))}setComplexValidators(){this.editing?this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.data.pool&&this.info&&-1!==this.info.pool_names.indexOf(_)&&this.info.pool_names.indexOf(_)!==this.info.pool_names.indexOf(this.data.pool.pool_name))]):(E.h.validateIf(this.form.get("size"),()=>this.isReplicated,[E.h.custom("min",_=>this.form.getValue("size")&&_this.form.getValue("size")&&this.getMaxSize()<_)]),this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.info&&-1!==this.info.pool_names.indexOf(_))])),this.setCompressionValidators()}setCompressionValidators(){E.h.validateIf(this.form.get("minBlobSize"),()=>this.hasCompressionEnabled(),[l.kI.min(0),E.h.custom("maximum",_=>this.oddBlobSize(_,this.form.getValue("maxBlobSize")))]),E.h.validateIf(this.form.get("maxBlobSize"),()=>this.hasCompressionEnabled(),[l.kI.min(0),E.h.custom("minimum",_=>this.oddBlobSize(this.form.getValue("minBlobSize"),_))]),E.h.validateIf(this.form.get("ratio"),()=>this.hasCompressionEnabled(),[l.kI.min(0),l.kI.max(1)])}oddBlobSize(_,o){const i=this.formatter.toBytes(_),s=this.formatter.toBytes(o);return Boolean(i&&s&&i>=s)}hasCompressionEnabled(){return this.form.getValue("mode")&&"none"!==this.form.get("mode").value.toLowerCase()}describeCrushStep(_){return[_.op.replace("_"," "),_.item_name||"",_.type?_.num+" type "+_.type:""].join(" ")}addErasureCodeProfile(){this.addModal(bo,_=>this.reloadECPs(_))}addModal(_,o){this.hideOpenTooltips(),this.modalService.show(_).componentInstance.submitAction.subscribe(s=>{o(s.name)})}hideOpenTooltips(){const _=o=>o&&o.isOpen()&&o.close();_(this.ecpDeletionBtn),_(this.crushDeletionBtn)}reloadECPs(_){this.reloadList({newItemName:_,getInfo:()=>this.ecpService.list(),initInfo:o=>this.initEcp(o),findNewItem:()=>this.ecProfiles.find(o=>o.name===_),controlName:"erasureProfile"})}reloadList({newItemName:_,getInfo:o,initInfo:i,findNewItem:s,controlName:c}){this.modalSubscription&&this.modalSubscription.unsubscribe(),o().subscribe(d=>{if(i(d),!_)return;const P=s();P&&this.form.get(c).setValue(P)})}deleteErasureCodeProfile(){this.deletionModal({value:this.form.getValue("erasureProfile"),usage:this.ecpUsage,deletionBtn:this.ecpDeletionBtn,dataName:"erasureInfo",getTabs:()=>this.ecpInfoTabs,tabPosition:"used-by-pools",nameAttribute:"name",itemDescription:"erasure code profile",reloadFn:()=>this.reloadECPs(),deleteFn:_=>this.ecpService.delete(_),taskName:"ecp/delete"})}deletionModal({value:_,usage:o,deletionBtn:i,dataName:s,getTabs:c,tabPosition:d,nameAttribute:P,itemDescription:p,reloadFn:R,deleteFn:h,taskName:S}){if(!_)return;if(o)return i.animation=!1,i.toggle(),this.data[s]=!0,void setTimeout(()=>{const u=c();u&&u.select(d)},50);const m=_[P];this.modalService.show(Fe.M,{itemDescription:p,itemNames:[m],submitActionObservable:()=>{const u=h(m);return u.subscribe(()=>R()),this.taskWrapper.wrapTaskAroundCall({task:new v.R(S,{name:m}),call:u})}})}addCrushRule(){this.addModal(y_,_=>this.reloadCrushRules(_))}reloadCrushRules(_){this.reloadList({newItemName:_,getInfo:()=>this.poolService.getInfo(),initInfo:o=>{this.initInfo(o),this.poolTypeChange("replicated")},findNewItem:()=>this.info.crush_rules_replicated.find(o=>o.rule_name===_),controlName:"crushRule"})}deleteCrushRule(){this.deletionModal({value:this.form.getValue("crushRule"),usage:this.crushUsage,deletionBtn:this.crushDeletionBtn,dataName:"crushInfo",getTabs:()=>this.crushInfoTabs,tabPosition:"used-by-pools",nameAttribute:"rule_name",itemDescription:"crush rule",reloadFn:()=>this.reloadCrushRules(),deleteFn:_=>this.crushRuleService.delete(_),taskName:"crushRule/delete"})}crushRuleIsUsedBy(_){this.crushUsage=_?this.info.used_rules[_]:void 0}ecpIsUsedBy(_){this.ecpUsage=_?this.info.used_profiles[_]:void 0}submit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _={pool:this.form.getValue("name")};this.assignFormFields(_,[{externalFieldName:"pool_type",formControlName:"poolType"},{externalFieldName:"pg_autoscale_mode",formControlName:"pgAutoscaleMode",editable:!0},{externalFieldName:"pg_num",formControlName:"pgNum",replaceFn:i=>"on"===this.form.getValue("pgAutoscaleMode")?1:i,editable:!0},this.isReplicated?{externalFieldName:"size",formControlName:"size"}:{externalFieldName:"erasure_code_profile",formControlName:"erasureProfile",attr:"name"},{externalFieldName:"rule_name",formControlName:"crushRule",replaceFn:i=>this.isReplicated?i&&i.rule_name:void 0},{externalFieldName:"quota_max_bytes",formControlName:"max_bytes",replaceFn:this.formatter.toBytes,editable:!0,resetValue:this.editing?0:void 0},{externalFieldName:"quota_max_objects",formControlName:"max_objects",editable:!0,resetValue:this.editing?0:void 0}]),this.info.is_all_bluestore&&(this.assignFormField(_,{externalFieldName:"flags",formControlName:"ecOverwrites",replaceFn:()=>this.isErasure?["ec_overwrites"]:void 0}),"none"!==this.form.getValue("mode")?this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:i=>this.hasCompressionEnabled()&&i},{externalFieldName:"compression_algorithm",formControlName:"algorithm",editable:!0},{externalFieldName:"compression_min_blob_size",formControlName:"minBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_max_blob_size",formControlName:"maxBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_required_ratio",formControlName:"ratio",editable:!0,resetValue:0}]):this.editing&&this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:()=>"unset"},{externalFieldName:"srcpool",formControlName:"name",editable:!0,replaceFn:()=>this.data.pool.pool_name}]));const o=this.data.applications.selected;(o.length>0||this.editing)&&(_.application_metadata=o),this.isReplicated&&!g().isEmpty(this.currentConfigurationValues)&&(_.configuration=this.currentConfigurationValues),this.triggerApiTask(_)}assignFormFields(_,o){o.forEach(i=>this.assignFormField(_,i))}assignFormField(_,{externalFieldName:o,formControlName:i,attr:s,replaceFn:c,editable:d,resetValue:P}){if(this.editing&&(!d||this.form.get(i).pristine))return;const p=this.form.getValue(i);let R=c?c(p):s?g().get(p,s):p;if(!p||!R){if(!d||g().isUndefined(P))return;R=P}_[o]=R}triggerApiTask(_){this.taskWrapper.wrapTaskAroundCall({task:new v.R("pool/"+(this.editing?M.MQ.EDIT:M.MQ.CREATE),{pool_name:_.hasOwnProperty("srcpool")?_.srcpool:_.pool}),call:this.poolService[this.editing?M.MQ.UPDATE:M.MQ.CREATE](_)}).subscribe({error:o=>{g().isObject(o.error)&&"34"===o.error.code&&this.form.get("pgNum").setErrors({34:!0}),this.form.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/pool"])})}appSelection(){this.form.get("name").updateValueAndValidity({emitEvent:!1,onlySelf:!0})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(S_.$),e.Y36(Oe.gz),e.Y36(Oe.F0),e.Y36(De.Z),e.Y36(ue.q),e.Y36(xe.j),e.Y36(T_.H),e.Y36(de.P),e.Y36(Me),e.Y36(ve.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-form"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Uo,5),e.Gf(Go,5),e.Gf(Ho,5),e.Gf(zo,5)),2&_){let i;e.iGM(i=e.CRH())&&(o.crushInfoTabs=i.first),e.iGM(i=e.CRH())&&(o.crushDeletionBtn=i.first),e.iGM(i=e.CRH())&&(o.ecpInfoTabs=i.first),e.iGM(i=e.CRH())&&(o.ecpDeletionBtn=i.first)}},features:[e.qOj],decls:1,vars:1,consts:function(){let n,_,o,i,s,c,d,P,p,R,h,S,m,u,A,$,I,D,x,y,Z,U,G,H,z,q,X,Q,w,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ne,ie,se,ae,le,re,ce,O,Xe,Qe,we,Je,ke,Ve,Ye,Be,je,Ke,We,e_,__,o_,t_,n_,i_,s_,a_,l_,r_,c_,O_,d_,u_,P_;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Name...",i="Pool type",s="-- Select a pool type --",c="Applications",d="Pools should be associated with an application tag",P="Quotas",p="Max bytes",R="Leave it blank or specify 0 to disable this quota.",h="A valid quota should be greater than 0.",S="e.g., 10GiB",m="Max objects",u="Leave it blank or specify 0 to disable this quota.",A="A valid quota should be greater than 0.",$="This field is required!",I="The chosen Ceph pool name is already in use.",D="It's not possible to create an RBD pool with '/' in the name. Please change the name or remove 'rbd' from the applications list.",x="Pool name can only contain letters, numbers, '.', '-', '_' or '/'.",y="This field is required!",Z="PG Autoscale",U="Placement groups",G="Calculation help",H="This field is required!",z="At least one placement group is needed!",q="Your cluster can't handle this many PGs. Please recalculate the PG amount needed.",X="The current PGs settings were calculated for you, you should make sure the values suit your needs before submit.",Q="Replicated size",w="Minimum: " + "\ufffd0\ufffd" + "",J="Maximum: " + "\ufffd0\ufffd" + "",k="The size specified is out of range. A value from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + " is usable.",V="A size of 1 will not create a replication of the object. The 'Replicated size' includes the object itself.",Y="Flags",B="EC Overwrites",j="CRUSH",K="Erasure code profile",N="This profile can't be deleted as it is in use.",W="Loading...",ee="-- No erasure code profile available --",_e="-- Select an erasure code profile --",oe="Profile",te="Used by pools",ne="Profile is not in use.",ie="Crush ruleset",se="A new crush ruleset will be implicitly created.",ae="Crush ruleset",le="There are no rules.",re="-- Select a crush rule --",ce="Placement and\n replication strategies or distribution policies that allow to\n specify how CRUSH places data replicas.",O="This rule can't be deleted as it is in use.",Xe="Crush rule",Qe="Crush steps",we="Used by pools",Je="Rule is not in use.",ke="This field is required!",Ve="The rule can't be used in the current cluster as it has too few OSDs to meet the minimum required OSD by this rule.",Ye="Compression",Be="Mode",je="Algorithm",Ke="Minimum blob size",We="e.g., 128KiB",e_="Maximum blob size",__="e.g., 512KiB",o_="Ratio",t_="Compression ratio",n_="Loading...",i_="-- No erasure compression algorithm available --",s_="Value should be greater than 0",a_="Value should be less than the maximum blob size",l_="Size must be a number or in a valid format. eg: 5 GiB",r_="Value should be greater than 0",c_="Value should be greater than the minimum blob size",O_="Size must be a number or in a valid format. eg: 5 GiB",d_="Value should be between 0.0 and 1.0",u_="Size must be a number or in a valid format. eg: 5 GiB",P_="The value should be greater or equal to 0",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","form","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],n,[1,"card-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],_,[1,"cd-col-form-input"],["id","name","name","name","type","text","placeholder",o,"formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","poolType",1,"cd-col-form-label","required"],i,["id","poolType","formControlName","poolType","name","poolType",1,"form-select"],["ngValue",""],s,[3,"value",4,"ngFor","ngForOf"],[4,"ngIf"],["for","applications",1,"cd-col-form-label"],c,["id","applications",3,"customBadges","customBadgeValidators","messages","data","options","selectionLimit","selection"],["title",d,3,"class",4,"ngIf"],["formGroupName","compression",4,"ngIf"],P,["for","max_bytes",1,"cd-col-form-label"],p,R,h,["id","max_bytes","name","max_bytes","type","text","formControlName","max_bytes","placeholder",S,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["for","max_objects",1,"cd-col-form-label"],m,u,A,["id","max_objects","min","0","name","max_objects","type","number","formControlName","max_objects",1,"form-control"],[3,"hidden"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],$,I,D,x,[3,"value"],y,["for","pgAutoscaleMode",1,"cd-col-form-label"],Z,["id","pgAutoscaleMode","name","pgAutoscaleMode","formControlName","pgAutoscaleMode",1,"form-select"],["class","form-group row",4,"ngIf"],["for","pgNum",1,"cd-col-form-label","required"],U,["id","pgNum","name","pgNum","formControlName","pgNum","min","1","type","number","required","",1,"form-control",3,"focus","blur"],[1,"form-text","text-muted"],["section","pgs","docText",G],["class","form-text text-muted",4,"ngIf"],H,z,q,X,["for","size",1,"cd-col-form-label","required"],Q,["id","size","name","size","type","number","formControlName","size",1,"form-control",3,"max","min"],["class","text-warning-dark",4,"ngIf"],[1,"list-inline"],w,J,k,[1,"text-warning-dark"],V,[1,"cd-col-form-label"],Y,[1,"custom-control","custom-checkbox"],["type","checkbox","id","ec-overwrites","formControlName","ecOverwrites",1,"custom-control-input"],["for","ec-overwrites",1,"custom-control-label"],B,["title",d],j,["for","erasureProfile",1,"cd-col-form-label"],K,[1,"input-group","mb-1"],["id","erasureProfile","name","erasureProfile","formControlName","erasureProfile",1,"form-select"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"ngClass","click"],["aria-hidden","true",3,"ngClass"],["class","btn btn-light","type","button",3,"click",4,"ngIf"],["class","btn btn-light","type","button","ngbTooltip",N,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","ecp-info-block",4,"ngIf"],W,[3,"ngValue"],ee,_e,["type","button",1,"btn","btn-light",3,"click"],["type","button","ngbTooltip",N,"triggers","manual",1,"btn","btn-light",3,"click"],["ecpDeletionBtn","ngbTooltip"],["id","ecp-info-block",1,"form-text","text-muted"],["ngbNav","",1,"nav-tabs"],["ecpInfoTabs","ngbNav"],["ngbNavItem","ecp-info"],["ngbNavLink",""],oe,["ngbNavContent",""],["ngbNavItem","used-by-pools"],te,[3,"ngbNavOutlet"],[3,"renderObjects","hideKeys","data","autoReload"],["ecpIsNotUsed",""],[4,"ngIf","ngIfElse"],ne,[4,"ngFor","ngForOf"],["for","crushRule",1,"cd-col-form-label"],ie,se,ae,["noRules",""],le,[1,"input-group"],["id","crushRule","formControlName","crushRule","name","crushSet",1,"form-select"],re,["id","crush-info-button","type","button","ngbTooltip",ce,1,"btn","btn-light",3,"ngClass","click"],["class","btn btn-light","type","button","ngbTooltip",O,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","crush-info-block",4,"ngIf"],["type","button","ngbTooltip",O,"triggers","manual",1,"btn","btn-light",3,"click"],["crushDeletionBtn","ngbTooltip"],["id","crush-info-block",1,"form-text","text-muted"],["crushInfoTabs","ngbNav"],["ngbNavItem","crush-rule-info"],Xe,["ngbNavItem","crush-rule-steps"],Qe,we,["ruleIsNotUsed",""],Je,ke,Ve,["formGroupName","compression"],Ye,["for","mode",1,"cd-col-form-label"],Be,["id","mode","name","mode","formControlName","mode",1,"form-select"],["for","algorithm",1,"cd-col-form-label"],je,["id","algorithm","name","algorithm","formControlName","algorithm",1,"form-select"],["for","minBlobSize",1,"cd-col-form-label"],Ke,["id","minBlobSize","name","minBlobSize","formControlName","minBlobSize","type","text","min","0","placeholder",We,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","maxBlobSize",1,"cd-col-form-label"],e_,["id","maxBlobSize","type","text","min","0","formControlName","maxBlobSize","placeholder",__,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","ratio",1,"cd-col-form-label"],o_,["id","ratio","name","ratio","formControlName","ratio","type","number","min","0","max","1","step","0.1","placeholder",t_,1,"form-control"],n_,i_,s_,a_,l_,r_,c_,O_,d_,u_,P_]},template:function(_,o){1&_&&e.YNc(0,_n,71,34,"div",0),2&_&&e.Q6J("cdFormLoading",o.loading)},dependencies:[C.mk,C.sg,C.O5,f.uN,f.Pz,f.nv,f.Vx,f.tO,f.Dy,Pe.S,Io.m,Do.K,fe.p,ye.b,Ee.U,xo.Q,yo.y,ge.o,pe.b,Re.P,me.V,l._Y,l.YN,l.Kr,l.Fj,l.wV,l.Wl,l.EJ,l.JJ,l.JL,l.Q7,l.qQ,l.Fd,l.sg,l.u,l.x0,f._L,Zo.d,C.rS,Ce.m],styles:[".icon-warning-color[_ngcontent-%COMP%]{margin-left:3px}"]}),t})();var on=r(19773),tn=r(20687),nn=r(68136),he=r(69158),Se=r(59019),L=r(99466),sn=r(91801),an=r(68774),ln=r(66369),Ge=r(38047),Te=r(51847);class rn{constructor(n){this.pool_name=n}}var cn=r(64724),On=r(60251),He=r(76317),dn=r(94928),un=r(23240),ze=r(51295),Pn=r(59376),fn=r(42176);function En(t,n){if(1&t&&e._UZ(0,"cd-table-key-value",12),2&t){const _=e.oxw(2);e.Q6J("renderObjects",!0)("data",_.poolDetails)("autoReload",!1)}}function gn(t,n){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.MGl("grafanaPath","ceph-pool-detail?var-pool_name=",_.selection.pool_name,""),e.Q6J("type","metrics")}}function pn(t,n){1&t&&(e.ynx(0,13),e.TgZ(1,"a",5),e.SDv(2,14),e.qZA(),e.YNc(3,gn,1,2,"ng-template",7),e.BQk())}function Rn(t,n){if(1&t&&e._UZ(0,"cd-rbd-configuration-table",18),2&t){const _=e.oxw(3);e.Q6J("data",_.selectedPoolConfiguration)}}function mn(t,n){1&t&&(e.ynx(0,16),e.TgZ(1,"a",5),e.SDv(2,17),e.qZA(),e.YNc(3,Rn,1,1,"ng-template",7),e.BQk())}function Cn(t,n){if(1&t&&e._UZ(0,"cd-table",21),2&t){const _=e.oxw(3);e.Q6J("data",_.cacheTiers)("columns",_.cacheTierColumns)("autoSave",!1)}}function Mn(t,n){1&t&&(e.ynx(0,19),e.TgZ(1,"a",5),e.SDv(2,20),e.qZA(),e.YNc(3,Cn,1,3,"ng-template",7),e.BQk())}function hn(t,n){if(1&t&&(e.ynx(0,1),e.TgZ(1,"nav",2,3),e.ynx(3,4),e.TgZ(4,"a",5),e.SDv(5,6),e.qZA(),e.YNc(6,En,1,3,"ng-template",7),e.BQk(),e.YNc(7,pn,4,0,"ng-container",8),e.YNc(8,mn,4,0,"ng-container",9),e.YNc(9,Mn,4,0,"ng-container",10),e.qZA(),e._UZ(10,"div",11),e.BQk()),2&t){const _=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.permissions.grafana.read),e.xp6(1),e.Q6J("ngIf","replicated"===o.selection.type),e.xp6(1),e.Q6J("ngIf",(null==o.selection.tiers?null:o.selection.tiers.length)>0),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Sn=(()=>{class t{constructor(_){this.poolService=_,this.cacheTierColumns=[],this.omittedPoolAttributes=["cdExecuting","cdIsBinary","stats"],this.cacheTierColumns=[{prop:"pool_name",name:"Name",flexGrow:3},{prop:"cache_mode",name:"Cache Mode",flexGrow:2},{prop:"cache_min_evict_age",name:"Min Evict Age",flexGrow:2},{prop:"cache_min_flush_age",name:"Min Flush Age",flexGrow:2},{prop:"target_max_bytes",name:"Target Max Bytes",flexGrow:2},{prop:"target_max_objects",name:"Target Max Objects",flexGrow:2}]}ngOnChanges(){this.selection&&(this.poolService.getConfiguration(this.selection.pool_name).subscribe(_=>{ze.T.updateChanged(this,{selectedPoolConfiguration:_})}),ze.T.updateChanged(this,{poolDetails:g().omit(this.selection,this.omittedPoolAttributes)}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-details"]],inputs:{cacheTiers:"cacheTiers",permissions:"permissions",selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let n,_,o,i,s;return n="Details",_="Performance Details",o="Pool details",i="Configuration",s="Cache Tiers Details",[["cdTableDetail","",4,"ngIf"],["cdTableDetail",""],["ngbNav","","cdStatefulTab","pool-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],n,["ngbNavContent",""],["ngbNavItem","performance-details",4,"ngIf"],["ngbNavItem","configuration",4,"ngIf"],["ngbNavItem","cache-tiers-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"renderObjects","data","autoReload"],["ngbNavItem","performance-details"],_,["title",o,"uid","-xyV8KCiz","grafanaStyle","three",3,"grafanaPath","type"],["ngbNavItem","configuration"],i,[3,"data"],["ngbNavItem","cache-tiers-details"],s,["columnMode","flex",3,"data","columns","autoSave"]]},template:function(_,o){1&_&&e.YNc(0,hn,11,4,"ng-container",0),2&_&&e.Q6J("ngIf",o.selection)},dependencies:[C.O5,f.uN,f.Pz,f.nv,f.Vx,f.tO,f.Dy,He.F,Se.a,ye.b,Pn.m,fn.P],changeDetection:0}),t})();const Tn=["poolUsageTpl"],Ln=["poolConfigurationSourceTpl"];function An(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",9,10),e.NdJ("fetchData",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.taskListService.fetch())})("setExpandedRow",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.setExpandedRow(i))})("updateSelection",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.updateSelection(i))}),e._UZ(2,"cd-table-actions",11)(3,"cd-pool-details",12),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.pools)("columns",_.columns)("hasDetails",!0)("status",_.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",_.permissions.pool)("selection",_.selection)("tableActions",_.tableActions),e.xp6(1),e.Q6J("selection",_.expandedRow)("permissions",_.permissions)("cacheTiers",_.cacheTiers)}}function Fn(t,n){1&t&&e._UZ(0,"cd-grafana",14),2&t&&e.Q6J("grafanaPath","ceph-pools-overview?")("type","metrics")}function Nn(t,n){1&t&&(e.ynx(0,2),e.TgZ(1,"a",3),e.SDv(2,13),e.qZA(),e.YNc(3,Fn,1,2,"ng-template",5),e.BQk())}function bn(t,n){if(1&t&&e._UZ(0,"cd-usage-bar",16),2&t){const _=e.oxw().row;e.Q6J("total",_.stats.bytes_used.latest+_.stats.avail_raw.latest)("used",_.stats.bytes_used.latest)("title",_.pool_name)}}function vn(t,n){if(1&t&&e.YNc(0,bn,1,3,"cd-usage-bar",15),2&t){const _=n.row;e.Q6J("ngIf",null==_.stats||null==_.stats.avail_raw?null:_.stats.avail_raw.latest)}}const Le="pool";let $n=(()=>{class t extends nn.o{constructor(_,o,i,s,c,d,P,p,R,h,S){super(),this.poolService=_,this.taskWrapper=o,this.ecpService=i,this.authStorageService=s,this.taskListService=c,this.modalService=d,this.pgCategoryService=P,this.dimlessPipe=p,this.urlBuilder=R,this.configurationService=h,this.actionLabels=S,this.selection=new an.r,this.executingTasks=[],this.tableStatus=new he.E,this.cacheTiers=[],this.monAllowPoolDelete=!1,this.permissions=this.authStorageService.getPermissions(),this.tableActions=[{permission:"create",icon:b.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE},{permission:"update",icon:b.P.edit,routerLink:()=>this.urlBuilder.getEdit(encodeURIComponent(this.selection.first().pool_name)),name:this.actionLabels.EDIT},{permission:"delete",icon:b.P.destroy,click:()=>this.deletePoolModal(),name:this.actionLabels.DELETE,disable:this.getDisableDesc.bind(this)}],this.permissions.configOpt.read&&this.configurationService.get("mon_allow_pool_delete").subscribe(m=>{if(g().has(m,"value")){const u=g().find(m.value,A=>"mon"===A.section)||{value:!1};this.monAllowPoolDelete="true"===u.value}})}ngOnInit(){const _=(o,i,s)=>g().get(i,o)>g().get(s,o)?1:-1;this.columns=[{prop:"pool_name",name:"Name",flexGrow:4,cellTransformation:L.e.executing},{prop:"data_protection",name:"Data Protection",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-gray"},flexGrow:1.3},{prop:"application_metadata",name:"Applications",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-primary"},flexGrow:1.5},{prop:"pg_status",name:"PG Status",flexGrow:1.2,cellClass:({row:o,column:i,value:s})=>this.getPgStatusCellClass(o,i,s)},{prop:"crush_rule",name:"Crush Ruleset",isHidden:!0,flexGrow:2},{name:"Usage",prop:"usage",cellTemplate:this.poolUsageTpl,flexGrow:1.2},{prop:"stats.rd_bytes.rates",name:"Read bytes",comparator:(o,i,s,c)=>_("stats.rd_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.wr_bytes.rates",name:"Write bytes",comparator:(o,i,s,c)=>_("stats.wr_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.rd.rate",name:"Read ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond},{prop:"stats.wr.rate",name:"Write ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond}],this.taskListService.init(()=>this.ecpService.list().pipe((0,on.zg)(o=>(this.ecProfileList=o,this.poolService.getList()))),void 0,o=>{this.pools=this.transformPoolsData(o),this.tableStatus=new he.E},()=>{this.table.reset(),this.tableStatus=new he.E(sn.T.ValueException)},o=>o.name.startsWith(`${Le}/`),(o,i)=>i.metadata.pool_name===o.pool_name,{default:o=>new rn(o.pool_name)})}updateSelection(_){this.selection=_}deletePoolModal(){const _=this.selection.first().pool_name;this.modalService.show(Fe.M,{itemDescription:"Pool",itemNames:[_],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new v.R(`${Le}/${M.MQ.DELETE}`,{pool_name:_}),call:this.poolService.delete(_)})})}getPgStatusCellClass(_,o,i){return{"text-right":!0,[`pg-${this.pgCategoryService.getTypeByStates(i)}`]:!0}}getErasureCodeProfile(_){let o="";return g().forEach(this.ecProfileList,i=>{i.name===_&&(o=`EC: ${i.k}+${i.m}`)}),o}transformPoolsData(_){const o=["bytes_used","max_avail","avail_raw","percent_used","rd_bytes","wr_bytes","rd","wr"],i={latest:0,rate:0,rates:[]};return g().forEach(_,s=>{s.pg_status=this.transformPgStatus(s.pg_status);const c={};g().forEach(o,d=>{c[d]=s.stats&&s.stats[d]?s.stats[d]:i}),s.stats=c,s.usage=c.percent_used.latest,!s.cdExecuting&&s.pg_num+s.pg_placement_num!==s.pg_num_target+s.pg_placement_num_target&&(s.cdExecuting="Updating"),["rd_bytes","wr_bytes"].forEach(d=>{s.stats[d].rates=s.stats[d].rates.map(P=>P[1])}),s.cdIsBinary=!0,"erasure"===s.type&&(s.data_protection=this.getErasureCodeProfile(s.erasure_code_profile)),"replicated"===s.type&&(s.data_protection=`replica: \xd7${s.size}`)}),_}transformPgStatus(_){const o=[];return g().forEach(_,(i,s)=>{o.push(`${i} ${s}`)}),o.join(", ")}getSelectionTiers(){if(typeof this.expandedRow<"u"){const _=this.expandedRow.tiers;this.cacheTiers=this.pools.filter(o=>_.includes(o.pool))}}getDisableDesc(){return!this.selection?.hasSelection||!this.monAllowPoolDelete&&"Pool deletion is disabled by the mon_allow_pool_delete configuration setting."}setExpandedRow(_){super.setExpandedRow(_),this.getSelectionTiers()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q),e.Y36(de.P),e.Y36(Me),e.Y36(xe.j),e.Y36(Ge.j),e.Y36(De.Z),e.Y36(tn.j),e.Y36(ln.n),e.Y36(Te.F),e.Y36(cn.e),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-list"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Se.a,5),e.Gf(Tn,7),e.Gf(Ln,5)),2&_){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.poolUsageTpl=i.first),e.iGM(i=e.CRH())&&(o.poolConfigurationSourceTpl=i.first)}},features:[e._Bn([Ge.j,{provide:Te.F,useValue:new Te.F(Le)}]),e.qOj],decls:10,vars:2,consts:function(){let n,_,o;return n="Pools List",_="Overall Performance",o="Ceph pools overview",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],n,["ngbNavContent",""],["ngbNavItem","",4,"cdScope"],[3,"ngbNavOutlet"],["poolUsageTpl",""],["id","pool-list","selectionType","single",3,"data","columns","hasDetails","status","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],["id","pool-list-actions",1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","","id","pool-list-details",3,"selection","permissions","cacheTiers"],_,["title",o,"uid","z99hzWtmk","grafanaStyle","two",3,"grafanaPath","type"],["decimals","2",3,"total","used","title",4,"ngIf"],["decimals","2",3,"total","used","title"]]},template:function(_,o){if(1&_&&(e.TgZ(0,"nav",0,1),e.ynx(2,2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,An,4,11,"ng-template",5),e.BQk(),e.YNc(6,Nn,4,0,"ng-container",6),e.qZA(),e._UZ(7,"div",7),e.YNc(8,vn,1,1,"ng-template",null,8,e.W1O)),2&_){const i=e.MAs(1);e.xp6(6),e.Q6J("cdScope","grafana"),e.xp6(1),e.Q6J("ngbNavOutlet",i)}},dependencies:[C.O5,f.uN,f.Pz,f.nv,f.Vx,f.tO,f.Dy,On.O,He.F,Se.a,dn.K,un.w,Sn],styles:["cd-pool-list .pg-clean{color:#008a00} cd-pool-list .pg-working{color:#25828e} cd-pool-list .pg-warning{color:#d48200} cd-pool-list .pg-unknown{color:#dc3545}"]}),t})(),qe=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[g_.t,C.ez,f.Oz,f_.m,Oe.Bz,l.UX,f.HK,E_.BlockModule]}),t})();const In=[{path:"",component:$n},{path:M.MQ.CREATE,component:Ue,data:{breadcrumbs:M.Qn.CREATE}},{path:`${M.MQ.EDIT}/:name`,component:Ue,data:{breadcrumbs:M.Qn.EDIT}}];let Dn=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[qe,Oe.Bz.forChild(In)]}),t})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/25.9d84971ea743706b.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/25.9d84971ea743706b.js new file mode 100644 index 000000000..a9bdf87f6 --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/25.9d84971ea743706b.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[25],{39025:(mt,Ae,p)=>{p.r(Ae),p.d(Ae,{BlockModule:()=>yt,RoutedBlockModule:()=>Or});var c=p(88692),r=p(20092),m=p(54247),ne=p(62946),F=p(51389),Ne=p(37496),L=p(79512),j=p(4222),re=p(54462),Pe=p(44466),le=p(23815),C=p.n(le),ce=p(35758),D=p(64762),ie=p(35732),V=p(93523),e=p(64537);let X=class{constructor(s){this.http=s}listTargets(){return this.http.get("api/iscsi/target")}getTarget(s){return this.http.get(`api/iscsi/target/${s}`)}updateTarget(s,t){return this.http.put(`api/iscsi/target/${s}`,t,{observe:"response"})}status(){return this.http.get("ui-api/iscsi/status")}settings(){return this.http.get("ui-api/iscsi/settings")}version(){return this.http.get("ui-api/iscsi/version")}portals(){return this.http.get("ui-api/iscsi/portals")}createTarget(s){return this.http.post("api/iscsi/target",s,{observe:"response"})}deleteTarget(s){return this.http.delete(`api/iscsi/target/${s}`,{observe:"response"})}getDiscovery(){return this.http.get("api/iscsi/discoveryauth")}updateDiscovery(s){return this.http.put("api/iscsi/discoveryauth",s)}overview(){return this.http.get("ui-api/iscsi/overview")}};X.\u0275fac=function(s){return new(s||X)(e.LFG(ie.eN))},X.\u0275prov=e.Yz7({token:X,factory:X.\u0275fac,providedIn:"root"}),X=(0,D.gn)([V.o,(0,D.w6)("design:paramtypes",[ie.eN])],X);var Fe=p(88002),De=p(76189),v=p(19358),be=p(34089);let H=class extends De.S{constructor(s,t){super(),this.http=s,this.rbdConfigurationService=t}isRBDPool(s){return-1!==C().indexOf(s.application_metadata,"rbd")&&!s.pool_name.includes("/")}create(s){return this.http.post("api/block/image",s,{observe:"response"})}delete(s){return this.http.delete(`api/block/image/${s.toStringEncoded()}`,{observe:"response"})}update(s,t){return this.http.put(`api/block/image/${s.toStringEncoded()}`,t,{observe:"response"})}get(s){return this.http.get(`api/block/image/${s.toStringEncoded()}`)}list(s){return this.http.get("api/block/image",{params:s,headers:{Accept:this.getVersionHeaderValue(2,0)},observe:"response"}).pipe((0,Fe.U)(t=>t.body.map(o=>(o.value.map(i=>(i.configuration&&i.configuration.map(_=>Object.assign(_,this.rbdConfigurationService.getOptionByName(_.name))),i)),o.headers=t.headers,o))))}copy(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/copy`,t,{observe:"response"})}flatten(s){return this.http.post(`api/block/image/${s.toStringEncoded()}/flatten`,null,{observe:"response"})}defaultFeatures(){return this.http.get("api/block/image/default_features")}cloneFormatVersion(){return this.http.get("api/block/image/clone_format_version")}createSnapshot(s,t,o){const i={snapshot_name:t,mirrorImageSnapshot:o};return this.http.post(`api/block/image/${s.toStringEncoded()}/snap`,i,{observe:"response"})}renameSnapshot(s,t,o){const i={new_snap_name:o};return this.http.put(`api/block/image/${s.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}protectSnapshot(s,t,o){const i={is_protected:o};return this.http.put(`api/block/image/${s.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}rollbackSnapshot(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/snap/${t}/rollback`,null,{observe:"response"})}cloneSnapshot(s,t,o){return this.http.post(`api/block/image/${s.toStringEncoded()}/snap/${t}/clone`,o,{observe:"response"})}deleteSnapshot(s,t){return this.http.delete(`api/block/image/${s.toStringEncoded()}/snap/${t}`,{observe:"response"})}listTrash(){return this.http.get("api/block/image/trash/")}createNamespace(s,t){return this.http.post(`api/block/pool/${s}/namespace`,{namespace:t},{observe:"response"})}listNamespaces(s){return this.http.get(`api/block/pool/${s}/namespace/`)}deleteNamespace(s,t){return this.http.delete(`api/block/pool/${s}/namespace/${t}`,{observe:"response"})}moveTrash(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/move_trash`,{delay:t},{observe:"response"})}purgeTrash(s){return this.http.post(`api/block/image/trash/purge/?pool_name=${s}`,null,{observe:"response"})}restoreTrash(s,t){return this.http.post(`api/block/image/trash/${s.toStringEncoded()}/restore`,{new_image_name:t},{observe:"response"})}removeTrash(s,t=!1){return this.http.delete(`api/block/image/trash/${s.toStringEncoded()}/?force=${t}`,{observe:"response"})}};H.\u0275fac=function(s){return new(s||H)(e.LFG(ie.eN),e.LFG(be.n))},H.\u0275prov=e.Yz7({token:H,factory:H.\u0275fac,providedIn:"root"}),(0,D.gn)([(0,D.fM)(1,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[v.N,String,Boolean]),(0,D.w6)("design:returntype",void 0)],H.prototype,"createSnapshot",null),(0,D.gn)([(0,D.fM)(2,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[v.N,String,String]),(0,D.w6)("design:returntype",void 0)],H.prototype,"renameSnapshot",null),(0,D.gn)([(0,D.fM)(2,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[v.N,String,Boolean]),(0,D.w6)("design:returntype",void 0)],H.prototype,"protectSnapshot",null),(0,D.gn)([(0,D.fM)(1,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[v.N,String]),(0,D.w6)("design:returntype",void 0)],H.prototype,"restoreTrash",null),H=(0,D.gn)([V.o,(0,D.w6)("design:paramtypes",[ie.eN,be.n])],H);var N=p(7022),x=p(14745),T=p(65862),k=p(93614),Z=p(95463),z=p(90070),h=p(48168),E=p(76111),u=p(32337),f=p(60312),A=p(30839),M=p(87925),B=p(94276),K=p(56310),J=p(41582);function no(n,s){if(1&n&&(e.TgZ(0,"option",6),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("ngValue",t),e.xp6(1),e.Oqu(t)}}function io(n,s){if(1&n&&(e.TgZ(0,"select",5),e._UZ(1,"option",6),e.YNc(2,no,2,2,"option",7),e.qZA()),2&n){const t=e.oxw();e.s9C("id",t.setting),e.s9C("name",t.setting),e.Q6J("formControlName",t.setting),e.xp6(1),e.Q6J("ngValue",null),e.xp6(1),e.Q6J("ngForOf",t.limits.values)}}function so(n,s){if(1&n&&e._UZ(0,"input",10),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function _o(n,s){if(1&n&&e._UZ(0,"input",11),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function ao(n,s){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"div",12),e._UZ(3,"input",13),e.TgZ(4,"label",14),e._uU(5,"Yes"),e.qZA()(),e.TgZ(6,"div",12),e._UZ(7,"input",13),e.TgZ(8,"label",14),e._uU(9,"No"),e.qZA()(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(3),e.Q6J("id",t.setting+"True")("value",!0)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"True"),e.xp6(3),e.Q6J("id",t.setting+"False")("value",!1)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"False")}}function ro(n,s){if(1&n&&(e.TgZ(0,"span"),e.YNc(1,so,1,1,"input",8),e.YNc(2,_o,1,1,"input",9),e.YNc(3,ao,10,8,"ng-container",3),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf","int"===t.limits.type),e.xp6(1),e.Q6J("ngIf","str"===t.limits.type),e.xp6(1),e.Q6J("ngIf","bool"===t.limits.type)}}function lo(n,s){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,16),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.min),e.QtT(2)}}function co(n,s){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,17),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.max),e.QtT(2)}}let gt=(()=>{class n{ngOnInit(){const t=[];"min"in this.limits&&t.push(r.kI.min(Number(this.limits.min))),"max"in this.limits&&t.push(r.kI.max(Number(this.limits.max))),this.settingsForm.get(this.setting).setValidators(t)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-setting"]],inputs:{settingsForm:"settingsForm",formDir:"formDir",setting:"setting",limits:"limits"},decls:7,vars:7,consts:function(){let s,t;return s="Must be greater than or equal to " + "\ufffd0\ufffd" + ".",t="Must be less than or equal to " + "\ufffd0\ufffd" + ".",[[1,"form-group",3,"formGroup"],[1,"col-form-label",3,"for"],["class","form-control",3,"id","name","formControlName",4,"ngIf"],[4,"ngIf"],["class","invalid-feedback",4,"ngIf"],[1,"form-control",3,"id","name","formControlName"],[3,"ngValue"],[3,"ngValue",4,"ngFor","ngForOf"],["type","number","class","form-control",3,"formControlName",4,"ngIf"],["type","text","class","form-control",3,"formControlName",4,"ngIf"],["type","number",1,"form-control",3,"formControlName"],["type","text",1,"form-control",3,"formControlName"],[1,"custom-control","custom-radio","custom-control-inline"],["type","radio",1,"custom-control-input",3,"id","value","formControlName"],[1,"custom-control-label",3,"for"],[1,"invalid-feedback"],s,t]},template:function(t,o){1&t&&(e.TgZ(0,"div",0)(1,"label",1),e._uU(2),e.qZA(),e.YNc(3,io,3,5,"select",2),e.YNc(4,ro,4,3,"span",3),e.YNc(5,lo,3,1,"span",4),e.YNc(6,co,3,1,"span",4),e.qZA()),2&t&&(e.Q6J("formGroup",o.settingsForm),e.xp6(1),e.s9C("for",o.setting),e.xp6(1),e.Oqu(o.setting),e.xp6(1),e.Q6J("ngIf","enum"===o.limits.type),e.xp6(1),e.Q6J("ngIf","enum"!==o.limits.type),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"min")),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"max")))},dependencies:[c.sg,c.O5,r.YN,r.Kr,r.Fj,r.wV,r.EJ,r._,r.JJ,r.JL,r.sg,r.u,M.o,B.b,K.P,J.V]}),n})();var Je=p(88820);function po(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function uo(n,s){if(1&n&&(e.TgZ(0,"span")(1,"legend",10),e.SDv(2,21),e.qZA(),e.TgZ(3,"div",12)(4,"div",13)(5,"label",22),e.SDv(6,23),e.qZA(),e._UZ(7,"input",24),e.YNc(8,po,2,0,"span",25),e.qZA()(),e.TgZ(9,"div",12)(10,"div",13)(11,"label",26),e.SDv(12,27),e.qZA(),e._UZ(13,"input",28),e.qZA()()()),2&n){const t=e.oxw(),o=e.MAs(9);e.xp6(8),e.Q6J("ngIf",t.settingsForm.showError("lun",o,"required"))}}function mo(n,s){if(1&n&&(e.TgZ(0,"option",31),e._uU(1),e.ALo(2,"iscsiBackstore"),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(e.lcZ(2,2,t))}}function go(n,s){if(1&n&&(e.TgZ(0,"div",12)(1,"div",13),e._UZ(2,"cd-iscsi-setting",33),e.qZA()()),2&n){const t=s.$implicit,o=e.oxw(2).$implicit,i=e.oxw(),_=e.MAs(9);e.xp6(2),e.Q6J("settingsForm",i.settingsForm)("formDir",_)("setting",t.key)("limits",i.getDiskControlLimits(o,t.key))}}function To(n,s){if(1&n&&(e.ynx(0),e.YNc(1,go,3,4,"div",32),e.ALo(2,"keyvalue"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngForOf",e.lcZ(2,1,o.disk_default_controls[t]))}}function fo(n,s){if(1&n&&(e.ynx(0),e.YNc(1,To,3,3,"ng-container",9),e.BQk()),2&n){const t=s.$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngIf",o.settingsForm.value.backstore===t)}}let Co=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={backstore:new r.p4(this.imagesSettings[this.image].backstore),lun:new r.p4(this.imagesSettings[this.image].lun),wwn:new r.p4(this.imagesSettings[this.image].wwn)};C().forEach(this.backstores,o=>{const i=this.imagesSettings[this.image][o]||{};C().forIn(this.disk_default_controls[o],(_,a)=>{t[a]=new r.p4(i[a])})}),this.settingsForm=new Z.d(t)}getDiskControlLimits(t,o){return this.disk_controls_limits?this.disk_controls_limits[t][o]:{type:"int"}}save(){const t=this.settingsForm.controls.backstore.value,o=this.settingsForm.controls.lun.value,i=this.settingsForm.controls.wwn.value,_={};C().forIn(this.settingsForm.controls,(a,l)=>{""!==a.value&&null!==a.value&&l in this.disk_default_controls[this.settingsForm.value.backstore]&&(_[l]=a.value,C().forEach(this.backstores,d=>{d!==t&&l in(this.imagesSettings[this.image][d]||{})&&(this.imagesSettings[this.image][d][l]=a.value)}))}),this.imagesSettings[this.image].backstore=t,this.imagesSettings[this.image].lun=o,this.imagesSettings[this.image].wwn=i,this.imagesSettings[this.image][t]=_,this.imagesSettings={...this.imagesSettings},this.control.updateValueAndValidity({emitEvent:!1}),this.activeModal.close()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(X),e.Y36(L.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-image-settings-modal"]],decls:25,vars:8,consts:function(){let s,t,o,i,_,a,l,d;return s="Configure",t="Changing these parameters from their default values is usually not necessary.",o="Settings",i="Backstore",_="Identifier",a="lun",l="wwn",d="This field is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","settingsForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,[4,"ngIf"],[1,"cd-header"],o,[1,"form-group","row"],[1,"col-sm-12"],[1,"col-form-label"],i,["id","backstore","name","backstore","formControlName","backstore",1,"form-select"],[3,"value",4,"ngFor","ngForOf"],[4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],_,["for","lun",1,"col-form-label","required"],a,["type","number","id","lun","name","lun","formControlName","lun",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","wwn",1,"col-form-label"],l,["type","text","id","wwn","name","wwn","formControlName","wwn",1,"form-control"],[1,"invalid-feedback"],d,[3,"value"],["class","form-group row",4,"ngFor","ngForOf"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1)(2),e.SDv(3,2),e.BQk(),e._uU(4,"\xa0 "),e.TgZ(5,"small"),e._uU(6),e.qZA(),e.BQk(),e.ynx(7,3),e.TgZ(8,"form",4,5)(10,"div",6)(11,"p",7),e.SDv(12,8),e.qZA(),e.YNc(13,uo,14,1,"span",9),e.TgZ(14,"legend",10),e.SDv(15,11),e.qZA(),e.TgZ(16,"div",12)(17,"div",13)(18,"label",14),e.SDv(19,15),e.qZA(),e.TgZ(20,"select",16),e.YNc(21,mo,3,4,"option",17),e.qZA()()(),e.YNc(22,fo,2,1,"ng-container",18),e.qZA(),e.TgZ(23,"div",19)(24,"cd-form-button-panel",20),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA()()(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(6),e.Oqu(o.image),e.xp6(2),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngIf",o.api_version>=1),e.xp6(8),e.Q6J("ngForOf",o.backstores),e.xp6(1),e.Q6J("ngForOf",o.backstores),e.xp6(2),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},dependencies:[c.sg,c.O5,r._Y,r.YN,r.Kr,r.Fj,r.wV,r.EJ,r.JJ,r.JL,r.sg,r.u,f.z,A.p,M.o,B.b,K.P,J.V,gt,c.Nd,Je.V]}),n})();function So(n,s){if(1&n&&(e.TgZ(0,"div",12)(1,"div",13),e._UZ(2,"cd-iscsi-setting",14),e.qZA()()),2&n){const t=s.$implicit,o=e.oxw(),i=e.MAs(5);e.xp6(2),e.Q6J("settingsForm",o.settingsForm)("formDir",i)("setting",t.key)("limits",o.getTargetControlLimits(t.key))}}let Ro=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={};C().forIn(this.target_default_controls,(o,i)=>{t[i]=new r.p4(this.target_controls.value[i])}),this.settingsForm=new Z.d(t)}save(){const t={};C().forIn(this.settingsForm.controls,(o,i)=>{""===o.value||null===o.value||(t[i]=o.value)}),this.target_controls.setValue(t),this.activeModal.close()}getTargetControlLimits(t){return this.target_controls_limits?this.target_controls_limits[t]:["Yes","No"].includes(this.target_default_controls[t])?{type:"bool"}:{type:"int"}}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(X),e.Y36(L.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-iqn-settings-modal"]],decls:13,vars:7,consts:function(){let s,t;return s="Advanced Settings",t="Changing these parameters from their default values is usually not necessary.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","settingsForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,["class","form-group row",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"form-group","row"],[1,"col-sm-12"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p",7),e.SDv(8,8),e.qZA(),e.YNc(9,So,3,4,"div",9),e.ALo(10,"keyvalue"),e.qZA(),e.TgZ(11,"div",10)(12,"cd-form-button-panel",11),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA()()(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngForOf",e.lcZ(10,5,o.settingsForm.controls)),e.xp6(3),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},dependencies:[c.sg,r._Y,r.JL,r.sg,f.z,A.p,K.P,J.V,gt,c.Nd]}),n})();var pe=p(63285),Eo=p(39092),Ye=p(58039),Tt=p(4416);let Mo=(()=>{class n{constructor(t){this.ngControl=t}onInput(t){this.setValue(t)}setValue(t){t=C().isString(t)?t.trim():t,this.ngControl.control.setValue(t)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(r.a5))},n.\u0275dir=e.lG2({type:n,selectors:[["","cdTrim",""]],hostBindings:function(t,o){1&t&&e.NdJ("input",function(_){return o.onInput(_.target.value)})}}),n})();var ft=p(63622),ot=p(10545);function Oo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,41),e.qZA())}function ho(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,42),e.qZA())}function Ao(n,s){1&n&&(e.TgZ(0,"span",40),e.ynx(1),e.SDv(2,43),e.BQk(),e._UZ(3,"br"),e.ynx(4),e.SDv(5,44),e.BQk(),e._UZ(6,"br"),e.TgZ(7,"a",45),e.SDv(8,46),e.qZA()())}function Po(n,s){1&n&&(e.TgZ(0,"span",47),e.SDv(1,48),e.qZA())}const U=function(n){return[n]};function bo(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,a=i.$implicit,l=e.oxw(2);return e.KtG(l.removePortal(_,a))}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,U,o.icons.destroy))}}function Io(n,s){if(1&n&&(e.TgZ(0,"span",40),e.SDv(1,52),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.pQV(t.minimum_gateways),e.QtT(1)}}function No(n,s){if(1&n&&(e.TgZ(0,"div",55),e._uU(1),e.qZA()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(1),e.hij("lun: ",o.imagesSettings[t].lun,"")}}function Fo(n,s){if(1&n&&(e.ynx(0),e.SDv(1,56),e.ALo(2,"iscsiBackstore"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(2),e.pQV(e.lcZ(2,1,o.imagesSettings[t].backstore)),e.QtT(1)}}function Do(n,s){1&n&&(e.ynx(0),e.SDv(1,57),e.BQk())}function Lo(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.YNc(3,No,2,1,"div",53),e.TgZ(4,"button",51),e.NdJ("click",function(){const _=e.CHM(t).$implicit,a=e.oxw(2);return e.KtG(a.imageSettingsModal(_))}),e._UZ(5,"i",15),e.qZA(),e.TgZ(6,"button",51),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,a=i.$implicit,l=e.oxw(2);return e.KtG(l.removeImage(_,a))}),e._UZ(7,"i",15),e.qZA()(),e.TgZ(8,"span",47),e.YNc(9,Fo,3,3,"ng-container",54),e.YNc(10,Do,2,0,"ng-container",54),e.qZA(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(1),e.Q6J("ngIf",o.api_version>=1),e.xp6(2),e.Q6J("ngClass",e.VKq(6,U,o.icons.deepCheck)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,U,o.icons.destroy)),e.xp6(2),e.Q6J("ngIf",o.backstores.length>1),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.imagesSettings[t][o.imagesSettings[t].backstore]))}}function vo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,58),e.qZA())}function $o(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,59),e.qZA())}function Bo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,77),e.qZA())}function Go(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,78),e.qZA())}function yo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,79),e.qZA())}function xo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,80),e.qZA())}function Zo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,81),e.qZA())}function wo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,82),e.qZA())}function Ho(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,83),e.qZA())}function ko(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,84),e.qZA())}function Ko(n,s){if(1&n&&(e.TgZ(0,"div",60)(1,"div",8)(2,"label",61),e.ynx(3),e.SDv(4,62),e.BQk(),e.qZA(),e.TgZ(5,"div",11),e._UZ(6,"input",63),e.YNc(7,Bo,2,0,"span",16),e.YNc(8,Go,2,0,"span",16),e.qZA()(),e.TgZ(9,"div",8)(10,"label",64),e.ynx(11),e.SDv(12,65),e.BQk(),e.qZA(),e.TgZ(13,"div",11)(14,"div",12),e._UZ(15,"input",66)(16,"button",67)(17,"cd-copy-2-clipboard-button",68),e.qZA(),e.YNc(18,yo,2,0,"span",16),e.YNc(19,xo,2,0,"span",16),e.qZA()(),e.TgZ(20,"div",8)(21,"label",69),e.ynx(22),e.SDv(23,70),e.BQk(),e.qZA(),e.TgZ(24,"div",11),e._UZ(25,"input",71),e.YNc(26,Zo,2,0,"span",16),e.YNc(27,wo,2,0,"span",16),e.qZA()(),e.TgZ(28,"div",8)(29,"label",72),e.ynx(30),e.SDv(31,73),e.BQk(),e.qZA(),e.TgZ(32,"div",11)(33,"div",12),e._UZ(34,"input",74)(35,"button",75)(36,"cd-copy-2-clipboard-button",76),e.qZA(),e.YNc(37,Ho,2,0,"span",16),e.YNc(38,ko,2,0,"span",16),e.qZA()()()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("user",t,"pattern")),e.xp6(10),e.Q6J("ngIf",o.targetForm.showError("password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("password",t,"pattern")),e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"pattern")),e.xp6(10),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"pattern"))}}function qo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,114),e.qZA())}function Xo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,115),e.qZA())}function Qo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,116),e.qZA())}function zo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,117),e.qZA())}function Jo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,118),e.qZA())}function Yo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,119),e.qZA())}function Vo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,120),e.qZA())}function Uo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,121),e.qZA())}function jo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,122),e.qZA())}function Wo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,123),e.qZA())}function en(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,124),e.qZA())}function tn(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,a=i.$implicit,l=e.oxw(),d=l.$implicit,g=l.index,S=e.oxw(3);return e.KtG(S.removeInitiatorImage(d,_,g,a))}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,U,o.icons.destroy))}}function on(n,s){1&n&&(e.TgZ(0,"span"),e.SDv(1,125),e.qZA())}function nn(n,s){if(1&n&&(e.TgZ(0,"div",21)(1,"div",22)(2,"cd-select",126),e._UZ(3,"i",24),e.ynx(4),e.SDv(5,127),e.BQk(),e.qZA()()()),2&n){const t=e.oxw(),o=t.$implicit,i=t.index,_=e.oxw(3);e.xp6(2),e.Q6J("data",o.getValue("luns"))("options",_.imagesInitiatorSelections[i])("messages",_.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(4,U,_.icons.add))}}function sn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",91)(1,"div",5),e.ynx(2),e.SDv(3,92),e.BQk(),e._uU(4),e.TgZ(5,"button",93),e.NdJ("click",function(){const _=e.CHM(t).index,a=e.oxw(3);return e.KtG(a.removeInitiator(_))}),e.qZA()(),e.TgZ(6,"div",7)(7,"div",8)(8,"label",94),e.SDv(9,95),e.qZA(),e.TgZ(10,"div",11)(11,"input",96),e.NdJ("blur",function(){e.CHM(t);const i=e.oxw(3);return e.KtG(i.updatedInitiatorSelector())}),e.qZA(),e.YNc(12,qo,2,0,"span",16),e.YNc(13,Xo,2,0,"span",16),e.YNc(14,Qo,2,0,"span",16),e.qZA()(),e.ynx(15,60),e.TgZ(16,"div",8)(17,"label",97),e.SDv(18,98),e.qZA(),e.TgZ(19,"div",11),e._UZ(20,"input",99),e.YNc(21,zo,2,0,"span",16),e.YNc(22,Jo,2,0,"span",16),e.qZA()(),e.TgZ(23,"div",8)(24,"label",100),e.SDv(25,101),e.qZA(),e.TgZ(26,"div",11)(27,"div",12),e._UZ(28,"input",102)(29,"button",103)(30,"cd-copy-2-clipboard-button",104),e.qZA(),e.YNc(31,Yo,2,0,"span",16),e.YNc(32,Vo,2,0,"span",16),e.qZA()(),e.TgZ(33,"div",8)(34,"label",105),e.ynx(35),e.SDv(36,106),e.BQk(),e.qZA(),e.TgZ(37,"div",11),e._UZ(38,"input",107),e.YNc(39,Uo,2,0,"span",16),e.YNc(40,jo,2,0,"span",16),e.qZA()(),e.TgZ(41,"div",8)(42,"label",108),e.SDv(43,109),e.qZA(),e.TgZ(44,"div",11)(45,"div",12),e._UZ(46,"input",110)(47,"button",103)(48,"cd-copy-2-clipboard-button",104),e.qZA(),e.YNc(49,Wo,2,0,"span",16),e.YNc(50,en,2,0,"span",16),e.qZA()(),e.BQk(),e.TgZ(51,"div",8)(52,"label",111),e.SDv(53,112),e.qZA(),e.TgZ(54,"div",11),e.YNc(55,tn,5,4,"ng-container",20),e.YNc(56,on,2,0,"span",54),e.YNc(57,nn,6,6,"div",113),e.qZA()()()()}if(2&n){const t=s.$implicit,o=s.index;e.oxw(2);const i=e.MAs(2);e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("client_iqn")," "),e.xp6(8),e.Q6J("ngIf",t.showError("client_iqn",i,"notUnique")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"pattern")),e.xp6(6),e.Q6J("id","user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"pattern")),e.xp6(6),e.Q6J("id","password"+o),e.xp6(1),e.Q6J("cdPasswordButton","password"+o),e.xp6(1),e.Q6J("source","password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_password"+o),e.xp6(1),e.Q6J("cdPasswordButton","mutual_password"+o),e.xp6(1),e.Q6J("source","mutual_password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"pattern")),e.xp6(5),e.Q6J("ngForOf",t.getValue("luns")),e.xp6(1),e.Q6J("ngIf",t.getValue("cdIsInGroup")),e.xp6(1),e.Q6J("ngIf",!t.getValue("cdIsInGroup"))}}function _n(n,s){1&n&&(e.TgZ(0,"span",47),e.SDv(1,128),e.qZA())}function an(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",8)(1,"label",85),e.SDv(2,86),e.qZA(),e.TgZ(3,"div",87),e.YNc(4,sn,58,24,"div",88),e.TgZ(5,"div",21)(6,"div",22),e.YNc(7,_n,2,0,"span",17),e.TgZ(8,"button",89),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addInitiator(),e.KtG(!1)}),e._UZ(9,"i",24),e.ynx(10),e.SDv(11,90),e.BQk(),e.qZA()()(),e._UZ(12,"hr"),e.qZA()()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.initiators.controls),e.xp6(3),e.Q6J("ngIf",0===t.initiators.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,U,t.icons.add))}}function rn(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const _=e.CHM(t).index,a=e.oxw(),l=a.$implicit,d=a.index,g=e.oxw(3);return e.KtG(g.removeGroupInitiator(l,_,d))}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,U,o.icons.destroy))}}function ln(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const _=e.CHM(t).index,a=e.oxw(),l=a.$implicit,d=a.index,g=e.oxw(3);return e.KtG(g.removeGroupDisk(l,_,d))}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,U,o.icons.destroy))}}function cn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",91)(1,"div",5),e.ynx(2),e.SDv(3,132),e.BQk(),e._uU(4),e.TgZ(5,"button",93),e.NdJ("click",function(){const _=e.CHM(t).index,a=e.oxw(3);return e.KtG(a.removeGroup(_))}),e.qZA()(),e.TgZ(6,"div",7)(7,"div",8)(8,"label",133),e.SDv(9,134),e.qZA(),e.TgZ(10,"div",11),e._UZ(11,"input",135),e.qZA()(),e.TgZ(12,"div",8)(13,"label",136),e.ynx(14),e.SDv(15,137),e.BQk(),e.qZA(),e.TgZ(16,"div",11),e.YNc(17,rn,5,4,"ng-container",20),e.TgZ(18,"div",21)(19,"div",22)(20,"cd-select",23),e.NdJ("selection",function(i){const a=e.CHM(t).index,l=e.oxw(3);return e.KtG(l.onGroupMemberSelection(i,a))}),e._UZ(21,"i",24),e.ynx(22),e.SDv(23,138),e.BQk(),e.qZA()()(),e._UZ(24,"hr"),e.qZA()(),e.TgZ(25,"div",8)(26,"label",27),e.ynx(27),e.SDv(28,139),e.BQk(),e.qZA(),e.TgZ(29,"div",11),e.YNc(30,ln,5,4,"ng-container",20),e.TgZ(31,"div",21)(32,"div",22)(33,"cd-select",126),e._UZ(34,"i",24),e.ynx(35),e.SDv(36,140),e.BQk(),e.qZA()()(),e._UZ(37,"hr"),e.qZA()()()()}if(2&n){const t=s.$implicit,o=s.index,i=e.oxw(3);e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("group_id")," "),e.xp6(13),e.Q6J("ngForOf",t.getValue("members")),e.xp6(3),e.Q6J("data",t.getValue("members"))("options",i.groupMembersSelections[o])("messages",i.messages.groupInitiator),e.xp6(1),e.Q6J("ngClass",e.VKq(12,U,i.icons.add)),e.xp6(9),e.Q6J("ngForOf",t.getValue("disks")),e.xp6(3),e.Q6J("data",t.getValue("disks"))("options",i.groupDiskSelections[o])("messages",i.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(14,U,i.icons.add))}}function dn(n,s){1&n&&(e.TgZ(0,"span",47),e.SDv(1,141),e.qZA())}function pn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",8)(1,"label",85),e.SDv(2,129),e.qZA(),e.TgZ(3,"div",130),e.YNc(4,cn,38,16,"div",88),e.TgZ(5,"div",21)(6,"div",22),e.YNc(7,dn,2,0,"span",17),e.TgZ(8,"button",89),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addGroup(),e.KtG(!1)}),e._UZ(9,"i",24),e.ynx(10),e.SDv(11,131),e.BQk(),e.qZA()()()()()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.groups.controls),e.xp6(3),e.Q6J("ngIf",0===t.groups.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,U,t.icons.add))}}function un(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7)(9,"div",8)(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11)(13,"div",12),e._UZ(14,"input",13),e.TgZ(15,"button",14),e.NdJ("click",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.targetSettingsModal())}),e._UZ(16,"i",15),e.qZA()(),e.YNc(17,Oo,2,0,"span",16),e.YNc(18,ho,2,0,"span",16),e.YNc(19,Ao,9,0,"span",16),e.YNc(20,Po,2,0,"span",17),e._UZ(21,"hr"),e.qZA()(),e.TgZ(22,"div",8)(23,"label",18),e.SDv(24,19),e.qZA(),e.TgZ(25,"div",11),e.YNc(26,bo,5,4,"ng-container",20),e.TgZ(27,"div",21)(28,"div",22)(29,"cd-select",23),e.NdJ("selection",function(i){e.CHM(t);const _=e.oxw();return e.KtG(_.onPortalSelection(i))}),e._UZ(30,"i",24),e.ynx(31),e.SDv(32,25),e.BQk(),e.qZA()()(),e._UZ(33,"input",26),e.YNc(34,Io,2,1,"span",16),e._UZ(35,"hr"),e.qZA()(),e.TgZ(36,"div",8)(37,"label",27),e.SDv(38,28),e.qZA(),e.TgZ(39,"div",11),e.YNc(40,Lo,11,10,"ng-container",20),e._UZ(41,"input",29),e.YNc(42,vo,2,0,"span",16),e.YNc(43,$o,2,0,"span",16),e.TgZ(44,"div",21)(45,"div",22)(46,"cd-select",23),e.NdJ("selection",function(i){e.CHM(t);const _=e.oxw();return e.KtG(_.onImageSelection(i))}),e._UZ(47,"i",24),e.ynx(48),e.SDv(49,30),e.BQk(),e.qZA()()(),e._UZ(50,"hr"),e.qZA()(),e.TgZ(51,"div",8)(52,"div",31)(53,"div",32),e._UZ(54,"input",33),e.TgZ(55,"label",34),e.SDv(56,35),e.qZA()(),e._UZ(57,"hr"),e.qZA()(),e.YNc(58,Ko,39,8,"div",36),e.YNc(59,an,13,5,"div",37),e.YNc(60,pn,12,5,"div",37),e.qZA(),e.TgZ(61,"div",38)(62,"cd-form-button-panel",39),e.NdJ("submitActionEvent",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.submit())}),e.ALo(63,"titlecase"),e.ALo(64,"upperFirst"),e.qZA()()()()()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.targetForm),e.xp6(6),e.pQV(e.lcZ(6,26,o.action))(e.lcZ(7,28,o.resource)),e.QtT(5),e.xp6(9),e.Q6J("ngClass",e.VKq(34,U,o.icons.deepCheck)),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"pattern")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"iqn")),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.targetForm.getValue("target_controls"))),e.xp6(6),e.Q6J("ngForOf",o.portals.value),e.xp6(3),e.Q6J("data",o.portals.value)("options",o.portalsSelections)("messages",o.messages.portals),e.xp6(1),e.Q6J("ngClass",e.VKq(36,U,o.icons.add)),e.xp6(4),e.Q6J("ngIf",o.targetForm.showError("portals",t,"minGateways")),e.xp6(6),e.Q6J("ngForOf",o.targetForm.getValue("disks")),e.xp6(2),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupLunId")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupWwn")),e.xp6(3),e.Q6J("data",o.disks.value)("options",o.imagesSelections)("messages",o.messages.images),e.xp6(1),e.Q6J("ngClass",e.VKq(38,U,o.icons.add)),e.xp6(11),e.Q6J("ngIf",o.cephIscsiConfigVersion>10&&!o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(2),e.Q6J("form",o.targetForm)("submitText",e.lcZ(63,30,o.action)+" "+e.lcZ(64,32,o.resource))}}let Ct=(()=>{class n extends k.E{constructor(t,o,i,_,a,l,d){super(),this.iscsiService=t,this.modalService=o,this.rbdService=i,this.router=_,this.route=a,this.taskWrapper=l,this.actionLabels=d,this.api_version=0,this.minimum_gateways=1,this.icons=T.P,this.isEdit=!1,this.portalsSelections=[],this.imagesInitiatorSelections=[],this.groupDiskSelections=[],this.groupMembersSelections=[],this.imagesSettings={},this.messages={portals:new N.a({noOptions:"There are no portals available."}),images:new N.a({noOptions:"There are no images available."}),initiatorImage:new N.a({noOptions:"There are no images available. Please make sure you add an image to the target."}),groupInitiator:new N.a({noOptions:"There are no initiators available. Please make sure you add an initiator to the target."})},this.IQN_REGEX=/^iqn\.(19|20)\d\d-(0[1-9]|1[0-2])\.\D{2,3}(\.[A-Za-z0-9-]+)+(:[A-Za-z0-9-\.]+)*$/,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.resource="target"}ngOnInit(){const t=new h.E(()=>{});t.pageInfo.limit=-1;const o=[this.iscsiService.listTargets(),this.rbdService.list(t.toParams()),this.iscsiService.portals(),this.iscsiService.settings(),this.iscsiService.version()];this.router.url.startsWith("/block/iscsi/targets/edit")&&(this.isEdit=!0,this.route.params.subscribe(i=>{this.target_iqn=decodeURIComponent(i.target_iqn),o.push(this.iscsiService.getTarget(this.target_iqn))})),this.action=this.isEdit?this.actionLabels.EDIT:this.actionLabels.CREATE,(0,ce.D)(o).subscribe(i=>{const _=C()(i[0]).filter(l=>l.target_iqn!==this.target_iqn).flatMap(l=>l.disks).map(l=>`${l.pool}/${l.image}`).value();"api_version"in i[3]&&(this.api_version=i[3].api_version),this.minimum_gateways=i[3].config.minimum_gateways,this.target_default_controls=i[3].target_default_controls,this.target_controls_limits=i[3].target_controls_limits,this.disk_default_controls=i[3].disk_default_controls,this.disk_controls_limits=i[3].disk_controls_limits,this.backstores=i[3].backstores,this.default_backstore=i[3].default_backstore,this.unsupported_rbd_features=i[3].unsupported_rbd_features,this.required_rbd_features=i[3].required_rbd_features,this.imagesAll=C()(i[1]).flatMap(l=>l.value).filter(l=>!l.namespace&&!(-1!==_.indexOf(`${l.pool_name}/${l.name}`)||0===this.getValidBackstores(l).length)).value(),this.imagesSelections=this.imagesAll.map(l=>new x.$(!1,`${l.pool_name}/${l.name}`,""));const a=[];i[2].forEach(l=>{l.ip_addresses.forEach(d=>{a.push(new x.$(!1,l.name+":"+d,""))})}),this.portalsSelections=[...a],this.cephIscsiConfigVersion=i[4].ceph_iscsi_config_version,this.createForm(),i[5]&&this.resolveModel(i[5]),this.loadingReady()})}createForm(){if(this.targetForm=new Z.d({target_iqn:new r.p4("iqn.2001-07.com.ceph:"+Date.now(),{validators:[r.kI.required,r.kI.pattern(this.IQN_REGEX)]}),target_controls:new r.p4({}),portals:new r.p4([],{validators:[z.h.custom("minGateways",t=>C().uniq(t.map(i=>i.split(":")[0])).length{const o=this.getLunIds(t);return o.length!==C().uniq(o).length}),z.h.custom("dupWwn",t=>{const o=this.getWwns(t);return o.length!==C().uniq(o).length})]}),initiators:new r.vC([]),groups:new r.vC([]),acl_enabled:new r.p4(!1)}),this.cephIscsiConfigVersion>10){const t=new Z.d({user:new r.p4(""),password:new r.p4(""),mutual_user:new r.p4(""),mutual_password:new r.p4("")});this.setAuthValidator(t),this.targetForm.addControl("auth",t)}}resolveModel(t){this.targetForm.patchValue({target_iqn:t.target_iqn,target_controls:t.target_controls,acl_enabled:t.acl_enabled}),this.cephIscsiConfigVersion>10&&this.targetForm.patchValue({auth:t.auth});const o=[];C().forEach(t.portals,_=>{o.push(`${_.host}:${_.ip}`)}),this.targetForm.patchValue({portals:o});const i=[];C().forEach(t.disks,_=>{const a=`${_.pool}/${_.image}`;i.push(a),this.imagesSettings[a]={backstore:_.backstore},this.imagesSettings[a][_.backstore]=_.controls,"lun"in _&&(this.imagesSettings[a].lun=_.lun),"wwn"in _&&(this.imagesSettings[a].wwn=_.wwn),this.onImageSelection({option:{name:a,selected:!0}})}),this.targetForm.patchValue({disks:i}),C().forEach(t.clients,_=>{const a=this.addInitiator();_.luns=C().map(_.luns,l=>`${l.pool}/${l.image}`),a.patchValue(_)}),t.groups.forEach((_,a)=>{const l=this.addGroup();_.disks=C().map(_.disks,d=>`${d.pool}/${d.image}`),l.patchValue(_),C().forEach(_.members,d=>{this.onGroupMemberSelection({option:new x.$(!0,d,"")},a)})})}hasAdvancedSettings(t){return Object.values(t).length>0}get portals(){return this.targetForm.get("portals")}onPortalSelection(){this.portals.setValue(this.portals.value)}removePortal(t,o){return this.portalsSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.portals.value.splice(t,1),this.portals.setValue(this.portals.value),!1}get disks(){return this.targetForm.get("disks")}removeImage(t,o){return this.imagesSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.disks.value.splice(t,1),this.removeImageRefs(o),this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1}),!1}removeImageRefs(t){this.initiators.controls.forEach(o=>{const i=o.value.luns.filter(_=>_!==t);o.get("luns").setValue(i)}),this.groups.controls.forEach(o=>{const i=o.value.disks.filter(_=>_!==t);o.get("disks").setValue(i)}),C().forEach(this.imagesInitiatorSelections,(o,i)=>{this.imagesInitiatorSelections[i]=o.filter(_=>_.name!==t)}),C().forEach(this.groupDiskSelections,(o,i)=>{this.groupDiskSelections[i]=o.filter(_=>_.name!==t)})}getDefaultBackstore(t){let o=this.default_backstore;const i=this.getImageById(t);return this.validFeatures(i,this.default_backstore)||this.backstores.forEach(_=>{_!==this.default_backstore&&this.validFeatures(i,_)&&(o=_)}),o}isLunIdInUse(t,o){const i=this.disks.value.filter(_=>_!==o);return this.getLunIds(i).includes(t)}getLunIds(t){return C().map(t,o=>this.imagesSettings[o].lun)}nextLunId(t){const o=this.disks.value.filter(a=>a!==t),i=this.getLunIds(o);let _=0;for(;i.includes(_);)_++;return _}getWwns(t){return C().map(t,i=>this.imagesSettings[i].wwn).filter(i=>C().isString(i)&&""!==i)}onImageSelection(t){const o=t.option;if(o.selected){if(this.imagesSettings[o.name])this.isLunIdInUse(this.imagesSettings[o.name].lun,o.name)&&(this.imagesSettings[o.name].lun=this.nextLunId(o.name));else{const i=this.getDefaultBackstore(o.name);this.imagesSettings[o.name]={backstore:i,lun:this.nextLunId(o.name)},this.imagesSettings[o.name][i]={}}C().forEach(this.imagesInitiatorSelections,(i,_)=>{i.push(new x.$(!1,o.name,"")),this.imagesInitiatorSelections[_]=[...i]}),C().forEach(this.groupDiskSelections,(i,_)=>{i.push(new x.$(!1,o.name,"")),this.groupDiskSelections[_]=[...i]})}else this.removeImageRefs(o.name);this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1})}get initiators(){return this.targetForm.get("initiators")}addInitiator(){const t=new Z.d({client_iqn:new r.p4("",{validators:[r.kI.required,z.h.custom("notUnique",i=>{const _=this.initiators.controls.reduce(function(a,l){return a.concat(l.value.client_iqn)},[]);return _.indexOf(i)!==_.lastIndexOf(i)}),r.kI.pattern(this.IQN_REGEX)]}),auth:new Z.d({user:new r.p4(""),password:new r.p4(""),mutual_user:new r.p4(""),mutual_password:new r.p4("")}),luns:new r.p4([]),cdIsInGroup:new r.p4(!1)});this.setAuthValidator(t),this.initiators.push(t),C().forEach(this.groupMembersSelections,(i,_)=>{i.push(new x.$(!1,"","")),this.groupMembersSelections[_]=[...i]});const o=C().map(this.targetForm.getValue("disks"),i=>new x.$(!1,i,""));return this.imagesInitiatorSelections.push(o),t}setAuthValidator(t){z.h.validateIf(t.get("user"),()=>t.getValue("password")||t.getValue("mutual_user")||t.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[t.get("password"),t.get("mutual_user"),t.get("mutual_password")]),z.h.validateIf(t.get("password"),()=>t.getValue("user")||t.getValue("mutual_user")||t.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("mutual_user"),t.get("mutual_password")]),z.h.validateIf(t.get("mutual_user"),()=>t.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_password")]),z.h.validateIf(t.get("mutual_password"),()=>t.getValue("mutual_user"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_user")])}removeInitiator(t){const o=this.initiators.value[t];this.initiators.removeAt(t),C().forEach(this.groupMembersSelections,(i,_)=>{i.splice(t,1),this.groupMembersSelections[_]=[...i]}),this.groups.controls.forEach(i=>{const _=i.value.members.filter(a=>a!==o.client_iqn);i.get("members").setValue(_)}),this.imagesInitiatorSelections.splice(t,1)}updatedInitiatorSelector(){this.initiators.controls.forEach(t=>{t.get("client_iqn").updateValueAndValidity({emitEvent:!1})}),C().forEach(this.groupMembersSelections,(t,o)=>{C().forEach(t,(i,_)=>{const a=i.name;i.name=this.initiators.controls[_].value.client_iqn,this.groups.controls.forEach(l=>{const d=l.value.members,g=d.indexOf(a);-1!==g&&(d[g]=i.name),l.get("members").setValue(d)})}),this.groupMembersSelections[o]=[...this.groupMembersSelections[o]]})}removeInitiatorImage(t,o,i,_){const a=t.getValue("luns");return a.splice(o,1),t.patchValue({luns:a}),this.imagesInitiatorSelections[i].forEach(l=>{l.name===_&&(l.selected=!1)}),!1}get groups(){return this.targetForm.get("groups")}addGroup(){const t=new Z.d({group_id:new r.p4("",{validators:[r.kI.required]}),members:new r.p4([]),disks:new r.p4([])});this.groups.push(t);const o=C().map(this.targetForm.getValue("disks"),_=>new x.$(!1,_,""));this.groupDiskSelections.push(o);const i=C().map(this.initiators.value,_=>new x.$(!1,_.client_iqn,"",!_.cdIsInGroup));return this.groupMembersSelections.push(i),t}removeGroup(t){this.groups.removeAt(t),this.groupMembersSelections[t].filter(i=>i.selected).forEach(i=>{i.selected=!1,this.onGroupMemberSelection({option:i},t)}),this.groupMembersSelections.splice(t,1),this.groupDiskSelections.splice(t,1)}onGroupMemberSelection(t,o){const i=t.option;let _=[];i.selected||(_=this.groupDiskSelections[o].filter(l=>l.selected).map(l=>l.name)),this.initiators.controls.forEach((a,l)=>{a.value.client_iqn===i.name&&(a.patchValue({luns:_}),a.get("cdIsInGroup").setValue(i.selected),C().forEach(this.groupMembersSelections,d=>{d[l].enabled=!i.selected}),this.imagesInitiatorSelections[l].forEach(d=>{d.selected=_.includes(d.name)}))})}removeGroupInitiator(t,o,i){const _=t.getValue("members")[o];t.getValue("members").splice(o,1),this.onGroupMemberSelection({option:new x.$(!1,_,"")},i)}removeGroupDisk(t,o,i){const _=t.getValue("disks")[o];t.getValue("disks").splice(o,1),this.groupDiskSelections[i].forEach(a=>{a.name===_&&(a.selected=!1)}),this.groupDiskSelections[i]=[...this.groupDiskSelections[i]]}submit(){const t=C().cloneDeep(this.targetForm.value),o={target_iqn:this.targetForm.getValue("target_iqn"),target_controls:this.targetForm.getValue("target_controls"),acl_enabled:this.targetForm.getValue("acl_enabled"),portals:[],disks:[],clients:[],groups:[]};if(this.cephIscsiConfigVersion>10){const _=this.targetForm.get("auth");_.getValue("user")||_.get("user").setValue(""),_.getValue("password")||_.get("password").setValue(""),_.getValue("mutual_user")||_.get("mutual_user").setValue(""),_.getValue("mutual_password")||_.get("mutual_password").setValue("");const a=this.targetForm.getValue("acl_enabled");o.auth={user:a?"":_.getValue("user"),password:a?"":_.getValue("password"),mutual_user:a?"":_.getValue("mutual_user"),mutual_password:a?"":_.getValue("mutual_password")}}let i;t.disks.forEach(_=>{const a=_.split("/"),l=this.imagesSettings[_].backstore;o.disks.push({pool:a[0],image:a[1],backstore:l,controls:this.imagesSettings[_][l],lun:this.imagesSettings[_].lun,wwn:this.imagesSettings[_].wwn})}),t.portals.forEach(_=>{const a=_.indexOf(":");o.portals.push({host:_.substring(0,a),ip:_.substring(a+1)})}),o.acl_enabled&&(t.initiators.forEach(_=>{_.auth.user||(_.auth.user=""),_.auth.password||(_.auth.password=""),_.auth.mutual_user||(_.auth.mutual_user=""),_.auth.mutual_password||(_.auth.mutual_password=""),delete _.cdIsInGroup;const a=[];_.luns.forEach(l=>{const d=l.split("/");a.push({pool:d[0],image:d[1]})}),_.luns=a}),o.clients=t.initiators),o.acl_enabled&&(t.groups.forEach(_=>{const a=[];_.disks.forEach(l=>{const d=l.split("/");a.push({pool:d[0],image:d[1]})}),_.disks=a}),o.groups=t.groups),this.isEdit?(o.new_target_iqn=o.target_iqn,o.target_iqn=this.target_iqn,i=this.taskWrapper.wrapTaskAroundCall({task:new E.R("iscsi/target/edit",{target_iqn:o.target_iqn}),call:this.iscsiService.updateTarget(this.target_iqn,o)})):i=this.taskWrapper.wrapTaskAroundCall({task:new E.R("iscsi/target/create",{target_iqn:o.target_iqn}),call:this.iscsiService.createTarget(o)}),i.subscribe({error:()=>{this.targetForm.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/block/iscsi/targets"])})}targetSettingsModal(){const t={target_controls:this.targetForm.get("target_controls"),target_default_controls:this.target_default_controls,target_controls_limits:this.target_controls_limits};this.modalRef=this.modalService.show(Ro,t)}imageSettingsModal(t){const o={imagesSettings:this.imagesSettings,image:t,api_version:this.api_version,disk_default_controls:this.disk_default_controls,disk_controls_limits:this.disk_controls_limits,backstores:this.getValidBackstores(this.getImageById(t)),control:this.targetForm.get("disks")};this.modalRef=this.modalService.show(Co,o)}validFeatures(t,o){const i=t.features,_=this.required_rbd_features[o];return(i&_)===_&&0==(i&this.unsupported_rbd_features[o])}getImageById(t){return this.imagesAll.find(o=>t===`${o.pool_name}/${o.name}`)}getValidBackstores(t){return this.backstores.filter(o=>this.validFeatures(t,o))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(X),e.Y36(pe.Z),e.Y36(H),e.Y36(m.F0),e.Y36(m.gz),e.Y36(u.P),e.Y36(L.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let s,t,o,i,_,a,l,d,g,S,I,P,$,y,Q,Y,ee,te,w,_e,ae,O,me,ge,Te,fe,Ce,Se,Re,G,Ge,ye,xe,Ze,we,He,ke,Ke,qe,Xe,Qe,ze,b,xt,Zt,wt,Ht,kt,Kt,qt,Xt,Qt,zt,Jt,Yt,Vt,Ut,jt,Wt,eo,to,oo;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Target IQN",o="Portals",i="Add portal",_="Images",a="Add image",l="ACL authentication",d="This field is required.",g="IQN has wrong pattern.",S="An IQN has the following notation 'iqn.$year-$month.$reversedAddress:$definedName'",I="For example: iqn.2016-06.org.dashboard:storage:disk.sn-a8675309",P="More information",$="This target has modified advanced settings.",y="At least " + "\ufffd0\ufffd" + " gateways are required.",Q="Backstore: " + "\ufffd0\ufffd" + ".\xA0",Y="This image has modified settings.",ee="Duplicated LUN numbers.",te="Duplicated WWN.",w="User",_e="Password",ae="Mutual User",O="Mutual Password",me="This field is required.",ge="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Te="This field is required.",fe="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Ce="This field is required.",Se="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Re="This field is required.",G="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Ge="Initiators",ye="Add initiator",xe="Initiator",Ze="Client IQN",we="User",He="Password",ke="Mutual User",Ke="Mutual Password",qe="Images",Xe="Initiator IQN needs to be unique.",Qe="This field is required.",ze="IQN has wrong pattern.",b="This field is required.",xt="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Zt="This field is required.",wt="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Ht="This field is required.",kt="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Kt="This field is required.",qt="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Xt="Initiator belongs to a group. Images will be configure in the group.",Qt="Add image",zt="No items added.",Jt="Groups",Yt="Add group",Vt="Group",Ut="Name",jt="Initiators",Wt="Add initiator",eo="Images",to="Add image",oo="No items added.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","targetForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],s,[1,"card-body"],[1,"form-group","row"],["for","target_iqn",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],[1,"input-group"],["type","text","id","target_iqn","name","target_iqn","formControlName","target_iqn","cdTrim","",1,"form-control"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],["class","invalid-feedback",4,"ngIf"],["class","form-text text-muted",4,"ngIf"],["for","portals",1,"cd-col-form-label","required"],o,[4,"ngFor","ngForOf"],[1,"row"],[1,"col-md-12"],["elemClass","btn btn-light float-end",3,"data","options","messages","selection"],[3,"ngClass"],i,["type","hidden","id","portals","name","portals","formControlName","portals",1,"form-control"],["for","disks",1,"cd-col-form-label"],_,["type","hidden","id","disks","name","disks","formControlName","disks",1,"form-control"],a,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","formControlName","acl_enabled","name","acl_enabled","id","acl_enabled",1,"custom-control-input"],["for","acl_enabled",1,"custom-control-label"],l,["formGroupName","auth",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,g,S,I,["target","_blank","href","https://en.wikipedia.org/wiki/ISCSI#Addressing"],P,[1,"form-text","text-muted"],$,[1,"input-group","cd-mb"],["type","text","disabled","",1,"cd-form-control",3,"value"],["type","button",1,"btn","btn-light",3,"click"],y,["class","input-group-text",4,"ngIf"],[4,"ngIf"],[1,"input-group-text"],Q,Y,ee,te,["formGroupName","auth"],["for","target_user",1,"cd-col-form-label"],w,["type","text","autocomplete","off","id","target_user","name","target_user","formControlName","user",1,"form-control"],["for","target_password",1,"cd-col-form-label"],_e,["type","password","autocomplete","new-password","id","target_password","name","target_password","formControlName","password",1,"form-control"],["type","button","cdPasswordButton","target_password",1,"btn","btn-light"],["source","target_password"],["for","target_mutual_user",1,"cd-col-form-label"],ae,["type","text","autocomplete","off","id","target_mutual_user","name","target_mutual_user","formControlName","mutual_user",1,"form-control"],["for","target_mutual_password",1,"cd-col-form-label"],O,["type","password","autocomplete","new-password","id","target_mutual_password","name","target_mutual_password","formControlName","mutual_password",1,"form-control"],["type","button","cdPasswordButton","target_mutual_password",1,"btn","btn-light"],["source","target_mutual_password"],me,ge,Te,fe,Ce,Se,Re,G,["for","initiators",1,"cd-col-form-label"],Ge,["formArrayName","initiators",1,"cd-col-form-input"],["class","card mb-2",3,"formGroup",4,"ngFor","ngForOf"],[1,"btn","btn-light","float-end",3,"click"],ye,[1,"card","mb-2",3,"formGroup"],xe,["type","button",1,"btn-close","float-end",3,"click"],["for","client_iqn",1,"cd-col-form-label","required"],Ze,["type","text","formControlName","client_iqn","cdTrim","",1,"form-control",3,"blur"],["for","user",1,"cd-col-form-label"],we,["formControlName","user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","password",1,"cd-col-form-label"],He,["formControlName","password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["type","button",1,"btn","btn-light",3,"cdPasswordButton"],[3,"source"],["for","mutual_user",1,"cd-col-form-label"],ke,["formControlName","mutual_user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","mutual_password",1,"cd-col-form-label"],Ke,["formControlName","mutual_password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["for","luns",1,"cd-col-form-label"],qe,["class","row",4,"ngIf"],Xe,Qe,ze,b,xt,Zt,wt,Ht,kt,Kt,qt,Xt,["elemClass","btn btn-light float-end",3,"data","options","messages"],Qt,zt,Jt,["formArrayName","groups",1,"cd-col-form-input"],Yt,Vt,["for","group_id",1,"cd-col-form-label","required"],Ut,["type","text","formControlName","group_id",1,"form-control"],["for","members",1,"cd-col-form-label"],jt,Wt,eo,to,oo]},template:function(t,o){1&t&&e.YNc(0,un,65,40,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},dependencies:[c.mk,c.sg,c.O5,r._Y,r.Fj,r.Wl,r.JJ,r.JL,r.sg,r.u,r.x0,r.CE,Eo.H,Ye.s,A.p,Tt.C,Mo,ft.y,M.o,B.b,K.P,J.V,c.rS,Je.V,ot.m],styles:[".cd-mb[_ngcontent-%COMP%]{margin-bottom:10px}"]}),n})();var St=p(68136),ue=p(30982),W=p(59019),Le=p(99466),Ee=p(68774),Rt=p(55657),de=p(38047),nt=p(18001),ve=p(97161),oe=p(47640);function mn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function gn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,31),e.qZA())}function Tn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,32),e.qZA())}function fn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,33),e.qZA())}function Cn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,34),e.qZA())}function Sn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,35),e.qZA())}function Rn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,36),e.qZA())}function En(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,37),e.qZA())}let Mn=(()=>{class n{constructor(t,o,i,_,a){this.authStorageService=t,this.activeModal=o,this.actionLabels=i,this.iscsiService=_,this.notificationService=a,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.permission=this.authStorageService.getPermissions().iscsi}ngOnInit(){this.hasPermission=this.permission.update,this.createForm(),this.iscsiService.getDiscovery().subscribe(t=>{this.discoveryForm.patchValue(t)})}createForm(){this.discoveryForm=new Z.d({user:new r.p4({value:"",disabled:!this.hasPermission}),password:new r.p4({value:"",disabled:!this.hasPermission}),mutual_user:new r.p4({value:"",disabled:!this.hasPermission}),mutual_password:new r.p4({value:"",disabled:!this.hasPermission})}),z.h.validateIf(this.discoveryForm.get("user"),()=>this.discoveryForm.getValue("password")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("password"),()=>this.discoveryForm.getValue("user")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("mutual_user"),()=>this.discoveryForm.getValue("mutual_password"),[r.kI.required],[r.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("mutual_password"),()=>this.discoveryForm.getValue("mutual_user"),[r.kI.required],[r.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user")])}submitAction(){this.iscsiService.updateDiscovery(this.discoveryForm.value).subscribe(()=>{this.notificationService.show(nt.k.success,"Updated discovery authentication"),this.activeModal.close()},()=>{this.discoveryForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(F.Kz),e.Y36(L.p4),e.Y36(X),e.Y36(ve.g))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-discovery-modal"]],decls:44,vars:13,consts:function(){let s,t,o,i,_,a,l,d,g,S,I,P,$;return s="Discovery Authentication",t="User",o="Password",i="Mutual User",_="Mutual Password",a="This field is required.",l="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",d="This field is required.",g="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",S="This field is required.",I="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",P="This field is required.",$="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","discoveryForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],t,[1,"cd-col-form-input"],["id","user","formControlName","user","type","text","autocomplete","off",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","password",1,"cd-col-form-label"],o,[1,"input-group"],["id","password","formControlName","password","type","password","autocomplete","new-password",1,"form-control"],["type","button","cdPasswordButton","password",1,"btn","btn-light"],["source","password"],["for","mutual_user",1,"cd-col-form-label"],i,["id","mutual_user","formControlName","mutual_user","type","text","autocomplete","off",1,"form-control"],["for","mutual_password",1,"cd-col-form-label"],_,["id","mutual_password","formControlName","mutual_password","type","password","autocomplete","new-password",1,"form-control"],["type","button","cdPasswordButton","mutual_password",1,"btn","btn-light"],["source","mutual_password"],[1,"modal-footer"],[3,"form","showSubmit","submitText","submitActionEvent"],[1,"invalid-feedback"],a,l,d,g,S,I,P,$]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"div",7)(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e._UZ(11,"input",11),e.YNc(12,mn,2,0,"span",12),e.YNc(13,gn,2,0,"span",12),e.qZA()(),e.TgZ(14,"div",7)(15,"label",13),e.SDv(16,14),e.qZA(),e.TgZ(17,"div",10)(18,"div",15),e._UZ(19,"input",16)(20,"button",17)(21,"cd-copy-2-clipboard-button",18),e.qZA(),e.YNc(22,Tn,2,0,"span",12),e.YNc(23,fn,2,0,"span",12),e.qZA()(),e.TgZ(24,"div",7)(25,"label",19),e.ynx(26),e.SDv(27,20),e.BQk(),e.qZA(),e.TgZ(28,"div",10),e._UZ(29,"input",21),e.YNc(30,Cn,2,0,"span",12),e.YNc(31,Sn,2,0,"span",12),e.qZA()(),e.TgZ(32,"div",7)(33,"label",22),e.SDv(34,23),e.qZA(),e.TgZ(35,"div",10)(36,"div",15),e._UZ(37,"input",24)(38,"button",25)(39,"cd-copy-2-clipboard-button",26),e.qZA(),e.YNc(40,Rn,2,0,"span",12),e.YNc(41,En,2,0,"span",12),e.qZA()()(),e.TgZ(42,"div",27)(43,"cd-form-button-panel",28),e.NdJ("submitActionEvent",function(){return o.submitAction()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.discoveryForm),e.xp6(8),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"pattern")),e.xp6(9),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"pattern")),e.xp6(7),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"pattern")),e.xp6(9),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"pattern")),e.xp6(2),e.Q6J("form",o.discoveryForm)("showSubmit",o.hasPermission)("submitText",o.actionLabels.SUBMIT)}},dependencies:[c.O5,r._Y,r.Fj,r.JJ,r.JL,r.sg,r.u,f.z,Ye.s,A.p,Tt.C,M.o,B.b,K.P,J.V]}),n})();var On=p(86969),it=p(34501),hn=p(30490),Me=p(94928);let Et=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-tabs"]],decls:7,vars:0,consts:function(){let s,t;return s="Overview",t="Targets",[[1,"nav","nav-tabs"],[1,"nav-item"],["routerLink","/block/iscsi/overview","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link"],s,["routerLink","/block/iscsi/targets","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link"],t]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0)(1,"li",1)(2,"a",2),e.SDv(3,3),e.qZA()(),e.TgZ(4,"li",1)(5,"a",4),e.SDv(6,5),e.qZA()()())},dependencies:[m.rH,m.Od]}),n})();var An=p(68962);const Pn=["highlightTpl"],bn=["detailTable"],In=["tree"],Nn=function(){return["logged_in"]},Fn=function(){return["logged_out"]},Dn=function(n,s){return{"badge-success":n,"badge-danger":s}};function Ln(n,s){if(1&n&&(e._UZ(0,"i"),e.TgZ(1,"span"),e._uU(2),e.qZA(),e._uU(3," \xa0 "),e.TgZ(4,"span",8),e._uU(5),e.qZA()),2&n){const t=s.$implicit;e.Tol(t.data.cdIcon),e.xp6(2),e.Oqu(t.data.name),e.xp6(2),e.Q6J("ngClass",e.WLB(7,Dn,e.DdM(5,Nn).includes(t.data.status),e.DdM(6,Fn).includes(t.data.status))),e.xp6(1),e.hij(" ",t.data.status," ")}}function vn(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"legend"),e._uU(2),e.qZA(),e._UZ(3,"cd-table",10,11),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.Oqu(t.title),e.xp6(1),e.Q6J("data",t.data)("columns",t.columns)("limit",0)}}function $n(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Bn(n,s){if(1&n&&(e.TgZ(0,"strong"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Gn(n,s){if(1&n&&(e.YNc(0,$n,2,1,"span",12),e.YNc(1,Bn,2,1,"strong",12)),2&n){const t=s.row;e.Q6J("ngIf",void 0===t.default||t.default===t.current),e.xp6(1),e.Q6J("ngIf",void 0!==t.default&&t.default!==t.current)}}let yn=(()=>{class n{set content(t){this.detailTable=t,t&&t.updateColumns()}constructor(t,o){this.iscsiBackstorePipe=t,this.booleanTextPipe=o,this.icons=T.P,this.metadata={},this.nodes=[],this.treeOptions={useVirtualScroll:!0,actionMapping:{mouse:{click:this.onNodeSelected.bind(this)}}}}ngOnInit(){this.columns=[{prop:"displayName",name:"Name",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"current",name:"Current",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"default",name:"Default",flexGrow:1,cellTemplate:this.highlightTpl}]}ngOnChanges(){this.selection&&(this.selectedItem=this.selection,this.generateTree()),this.data=void 0}generateTree(){const t=C().cloneDeep(this.selectedItem.target_controls);this.cephIscsiConfigVersion>10&&C().extend(t,C().cloneDeep(this.selectedItem.auth)),this.metadata={root:t};const o={target:{expanded:C().join(this.selectedItem.cdExecuting?[T.P.large,T.P.spinner,T.P.spin]:[T.P.large,T.P.bullseye]," ")},initiators:{expanded:C().join([T.P.large,T.P.user]," "),leaf:C().join([T.P.user]," ")},groups:{expanded:C().join([T.P.large,T.P.users]," "),leaf:C().join([T.P.users]," ")},disks:{expanded:C().join([T.P.large,T.P.disk]," "),leaf:C().join([T.P.disk]," ")},portals:{expanded:C().join([T.P.large,T.P.server]," "),leaf:C().join([T.P.server]," ")}},i=[];C().forEach(this.selectedItem.disks,d=>{const g="disk_"+d.pool+"_"+d.image;this.metadata[g]={controls:d.controls,backstore:d.backstore},["wwn","lun"].forEach(S=>{S in d&&(this.metadata[g][S]=d[S])}),i.push({name:`${d.pool}/${d.image}`,cdId:g,cdIcon:o.disks.leaf})});const _=[];C().forEach(this.selectedItem.portals,d=>{_.push({name:`${d.host}:${d.ip}`,cdIcon:o.portals.leaf})});const a=[];C().forEach(this.selectedItem.clients,d=>{const g=C().cloneDeep(d.auth);d.info&&(C().extend(g,d.info),delete g.state,C().forEach(Object.keys(d.info.state),P=>{g[P.toLowerCase()]=d.info.state[P]})),this.metadata["client_"+d.client_iqn]=g;const S=[];d.luns.forEach(P=>{S.push({name:`${P.pool}/${P.image}`,cdId:"disk_"+P.pool+"_"+P.image,cdIcon:o.disks.leaf})});let I="";d.info&&(I=Object.keys(d.info.state).includes("LOGGED_IN")?"logged_in":"logged_out"),a.push({name:d.client_iqn,status:I,cdId:"client_"+d.client_iqn,children:S,cdIcon:o.initiators.leaf})});const l=[];C().forEach(this.selectedItem.groups,d=>{const g=[];d.disks.forEach(I=>{g.push({name:`${I.pool}/${I.image}`,cdId:"disk_"+I.pool+"_"+I.image,cdIcon:o.disks.leaf})});const S=[];d.members.forEach(I=>{S.push({name:I,cdId:"client_"+I})}),l.push({name:d.group_id,cdIcon:o.groups.leaf,children:[{name:"Disks",children:g,cdIcon:o.disks.expanded},{name:"Initiators",children:S,cdIcon:o.initiators.expanded}]})}),this.nodes=[{name:this.selectedItem.target_iqn,cdId:"root",isExpanded:!0,cdIcon:o.target.expanded,children:[{name:"Disks",isExpanded:!0,children:i,cdIcon:o.disks.expanded},{name:"Portals",isExpanded:!0,children:_,cdIcon:o.portals.expanded},{name:"Initiators",isExpanded:!0,children:a,cdIcon:o.initiators.expanded},{name:"Groups",isExpanded:!0,children:l,cdIcon:o.groups.expanded}]}]}format(t){return"boolean"==typeof t?this.booleanTextPipe.transform(t):t}onNodeSelected(t,o){if(ne.iM.ACTIVATE(t,o,!0),o.data.cdId){this.title=o.data.name;const i=this.metadata[o.data.cdId]||{};"root"===o.data.cdId?(this.detailTable?.toggleColumn({prop:"default",isHidden:!0}),this.data=C().map(this.settings.target_default_controls,(_,a)=>({displayName:a,default:_=this.format(_),current:C().isUndefined(i[a])?_:this.format(i[a])})),this.cephIscsiConfigVersion>10&&["user","password","mutual_user","mutual_password"].forEach(_=>{this.data.push({displayName:_,default:null,current:i[_]})})):o.data.cdId.toString().startsWith("disk_")?(this.detailTable?.toggleColumn({prop:"default",isHidden:!0}),this.data=C().map(this.settings.disk_default_controls[i.backstore],(_,a)=>({displayName:a,default:_=this.format(_),current:C().isUndefined(i.controls[a])?_:this.format(i.controls[a])})),this.data.push({displayName:"backstore",default:this.iscsiBackstorePipe.transform(this.settings.default_backstore),current:this.iscsiBackstorePipe.transform(i.backstore)}),["wwn","lun"].forEach(_=>{_ in i&&this.data.push({displayName:_,default:void 0,current:i[_]})})):(this.detailTable?.toggleColumn({prop:"default",isHidden:!1}),this.data=C().map(i,(_,a)=>({displayName:a,default:void 0,current:this.format(_)})))}else this.data=void 0;this.detailTable?.updateColumns()}onUpdateData(){this.tree.treeModel.expandAll()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(Je.V),e.Y36(An.T))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Pn,7),e.Gf(bn,5),e.Gf(In,5)),2&t){let i;e.iGM(i=e.CRH())&&(o.highlightTpl=i.first),e.iGM(i=e.CRH())&&(o.content=i.first),e.iGM(i=e.CRH())&&(o.tree=i.first)}},inputs:{selection:"selection",settings:"settings",cephIscsiConfigVersion:"cephIscsiConfigVersion"},features:[e.TTD],decls:11,vars:3,consts:function(){let s;return s="iSCSI Topology",[[1,"row"],[1,"col-6"],s,[3,"nodes","options","updateData"],["tree",""],["treeNodeTemplate",""],["class","col-6 metadata",4,"ngIf"],["highlightTpl",""],[1,"badge",3,"ngClass"],[1,"col-6","metadata"],["columnMode","flex",3,"data","columns","limit"],["detailTable",""],[4,"ngIf"]]},template:function(t,o){1&t&&(e.TgZ(0,"div",0)(1,"div",1)(2,"legend"),e.SDv(3,2),e.qZA(),e.TgZ(4,"tree-root",3,4),e.NdJ("updateData",function(){return o.onUpdateData()}),e.YNc(6,Ln,6,10,"ng-template",null,5,e.W1O),e.qZA()(),e.YNc(8,vn,5,4,"div",6),e.qZA(),e.YNc(9,Gn,2,2,"ng-template",null,7,e.W1O)),2&t&&(e.xp6(4),e.Q6J("nodes",o.nodes)("options",o.treeOptions),e.xp6(4),e.Q6J("ngIf",o.data))},dependencies:[c.mk,c.O5,W.a,ne.qr]}),n})();function xn(n,s){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"span"),e.SDv(3,6),e.qZA(),e.TgZ(4,"pre"),e._uU(5),e.qZA(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(5),e.Oqu(t.status)}}function Zn(n,s){if(1&n&&(e.TgZ(0,"cd-alert-panel",2),e.ynx(1),e.tHW(2,3),e._UZ(3,"cd-doc",4),e.N_p(),e.BQk(),e.YNc(4,xn,6,1,"ng-container",5),e.qZA()),2&n){const t=e.oxw();e.xp6(4),e.Q6J("ngIf",t.status)}}function wn(n,s){if(1&n&&e._UZ(0,"cd-iscsi-target-details",15),2&n){const t=e.oxw(2);e.Q6J("cephIscsiConfigVersion",t.cephIscsiConfigVersion)("selection",t.expandedRow)("settings",t.settings)}}const Hn=function(n){return[n]};function kn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",7,8),e.NdJ("fetchData",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.getTargets())})("setExpandedRow",function(i){e.CHM(t);const _=e.oxw();return e.KtG(_.setExpandedRow(i))})("updateSelection",function(i){e.CHM(t);const _=e.oxw();return e.KtG(_.updateSelection(i))}),e.TgZ(2,"div",9),e._UZ(3,"cd-table-actions",10),e.TgZ(4,"button",11),e.NdJ("click",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.configureDiscoveryAuth())}),e._UZ(5,"i",12),e.ynx(6),e.SDv(7,13),e.BQk(),e.qZA()(),e.YNc(8,wn,1,3,"cd-iscsi-target-details",14),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.targets)("columns",t.columns)("hasDetails",!0)("autoReload",!1)("status",t.tableStatus),e.xp6(3),e.Q6J("permission",t.permission)("selection",t.selection)("tableActions",t.tableActions),e.xp6(2),e.Q6J("ngClass",e.VKq(10,Hn,t.icons.key)),e.xp6(3),e.Q6J("ngIf",t.expandedRow)}}let Kn=(()=>{class n extends St.o{constructor(t,o,i,_,a,l,d,g,S){super(S),this.authStorageService=t,this.iscsiService=o,this.joinPipe=i,this.taskListService=_,this.notAvailablePipe=a,this.modalService=l,this.taskWrapper=d,this.actionLabels=g,this.ngZone=S,this.available=void 0,this.selection=new Ee.r,this.targets=[],this.icons=T.P,this.builders={"iscsi/target/create":I=>({target_iqn:I.target_iqn})},this.permission=this.authStorageService.getPermissions().iscsi,this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>"/block/iscsi/targets/create",name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>`/block/iscsi/targets/edit/${this.selection.first().target_iqn}`,name:this.actionLabels.EDIT,disable:()=>this.getEditDisableDesc()},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteIscsiTargetModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Target",prop:"target_iqn",flexGrow:2,cellTransformation:Le.e.executing},{name:"Portals",prop:"cdPortals",pipe:this.joinPipe,flexGrow:2},{name:"Images",prop:"cdImages",pipe:this.joinPipe,flexGrow:2},{name:"# Sessions",prop:"info.num_sessions",pipe:this.notAvailablePipe,flexGrow:1}],this.iscsiService.status().subscribe(t=>{this.available=t.available,t.available||(this.status=t.message)})}getTargets(){this.available&&(this.setTableRefreshTimeout(),this.iscsiService.version().subscribe(t=>{this.cephIscsiConfigVersion=t.ceph_iscsi_config_version}),this.taskListService.init(()=>this.iscsiService.listTargets(),t=>this.prepareResponse(t),t=>this.targets=t,()=>this.onFetchError(),this.taskFilter,this.itemFilter,this.builders),this.iscsiService.settings().subscribe(t=>{this.settings=t}))}ngOnDestroy(){this.summaryDataSubscription&&this.summaryDataSubscription.unsubscribe()}getEditDisableDesc(){const t=this.selection.first();return t&&t?.cdExecuting?t.cdExecuting:t&&C().isUndefined(t?.info)?"Unavailable gateway(s)":!t}getDeleteDisableDesc(){const t=this.selection.first();return t?.cdExecuting?t.cdExecuting:t&&C().isUndefined(t?.info)?"Unavailable gateway(s)":t&&t?.info?.num_sessions?"Target has active sessions":!t}prepareResponse(t){return t.forEach(o=>{o.cdPortals=o.portals.map(i=>`${i.host}:${i.ip}`),o.cdImages=o.disks.map(i=>`${i.pool}/${i.image}`)}),t}onFetchError(){this.table.reset()}itemFilter(t,o){return t.target_iqn===o.metadata.target_iqn}taskFilter(t){return["iscsi/target/create","iscsi/target/edit","iscsi/target/delete"].includes(t.name)}updateSelection(t){this.selection=t}deleteIscsiTargetModal(){const t=this.selection.first().target_iqn;this.modalRef=this.modalService.show(ue.M,{itemDescription:"iSCSI target",itemNames:[t],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new E.R("iscsi/target/delete",{target_iqn:t}),call:this.iscsiService.deleteTarget(t)})})}configureDiscoveryAuth(){this.modalService.show(Mn)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(X),e.Y36(On.A),e.Y36(de.j),e.Y36(Rt.g),e.Y36(pe.Z),e.Y36(u.P),e.Y36(L.p4),e.Y36(e.R0b))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-list"]],viewQuery:function(t,o){if(1&t&&e.Gf(W.a,5),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first)}},features:[e._Bn([de.j]),e.qOj],decls:3,vars:2,consts:function(){let s,t,o,i;return s="iSCSI Targets not available",t="Please consult the " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + " on how to configure and enable the iSCSI Targets management functionality.",o="Available information:",i="Discovery authentication",[["type","info","title",s,4,"ngIf"],["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection",4,"ngIf"],["type","info","title",s],t,["section","iscsi"],[4,"ngIf"],o,["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],i,["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings",4,"ngIf"],["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.YNc(1,Zn,5,1,"cd-alert-panel",0),e.YNc(2,kn,9,12,"cd-table",1)),2&t&&(e.xp6(1),e.Q6J("ngIf",!1===o.available),e.xp6(1),e.Q6J("ngIf",!0===o.available))},dependencies:[c.mk,c.O5,it.G,hn.K,W.a,Me.K,M.o,Et,yn]}),n})();var st=p(66369),qn=p(76446),Xn=p(90068);const Qn=["iscsiSparklineTpl"],zn=["iscsiPerSecondTpl"],Jn=["iscsiRelativeDateTpl"];function Yn(n,s){if(1&n&&(e.TgZ(0,"span"),e._UZ(1,"cd-sparkline",9),e.qZA()),2&n){const t=e.oxw(),o=t.value,i=t.row;e.xp6(1),e.Q6J("data",o)("isBinary",i.cdIsBinary)}}function Vn(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function Un(n,s){if(1&n&&(e.YNc(0,Yn,2,2,"span",7),e.YNc(1,Vn,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function jn(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",t," /s ")}}function Wn(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function ei(n,s){if(1&n&&(e.YNc(0,jn,2,1,"span",7),e.YNc(1,Wn,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function ti(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"notAvailable"),e.ALo(3,"relativeDate"),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",e.lcZ(2,1,e.lcZ(3,3,t))," ")}}function oi(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function ni(n,s){if(1&n&&(e.YNc(0,ti,4,5,"span",7),e.YNc(1,oi,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}let ii=(()=>{class n{constructor(t,o,i){this.iscsiService=t,this.dimlessPipe=o,this.iscsiBackstorePipe=i,this.gateways=[],this.images=[]}ngOnInit(){this.gatewaysColumns=[{name:"Name",prop:"name"},{name:"State",prop:"state",flexGrow:1,cellTransformation:Le.e.badge,customTemplateConfig:{map:{up:{class:"badge-success"},down:{class:"badge-danger"}}}},{name:"# Targets",prop:"num_targets"},{name:"# Sessions",prop:"num_sessions"}],this.imagesColumns=[{name:"Pool",prop:"pool"},{name:"Image",prop:"image"},{name:"Backstore",prop:"backstore",pipe:this.iscsiBackstorePipe},{name:"Read Bytes",prop:"stats_history.rd_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Write Bytes",prop:"stats_history.wr_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Read Ops",prop:"stats.rd",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"Write Ops",prop:"stats.wr",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"A/O Since",prop:"optimized_since",cellTemplate:this.iscsiRelativeDateTpl}]}refresh(){this.iscsiService.overview().subscribe(t=>{this.gateways=t.gateways,this.images=t.images,this.images.map(o=>(o.stats_history&&(o.stats_history.rd_bytes=o.stats_history.rd_bytes.map(i=>i[1]),o.stats_history.wr_bytes=o.stats_history.wr_bytes.map(i=>i[1])),o.cdIsBinary=!0,o))})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(X),e.Y36(st.n),e.Y36(Je.V))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Qn,7),e.Gf(zn,7),e.Gf(Jn,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.iscsiSparklineTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiPerSecondTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiRelativeDateTpl=i.first)}},decls:15,vars:4,consts:function(){let s,t;return s="Gateways",t="Images",[s,[3,"data","columns","fetchData"],t,[3,"data","columns"],["iscsiSparklineTpl",""],["iscsiPerSecondTpl",""],["iscsiRelativeDateTpl",""],[4,"ngIf"],["class","text-muted",4,"ngIf"],[3,"data","isBinary"],[1,"text-muted"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.TgZ(1,"legend"),e.SDv(2,0),e.qZA(),e.TgZ(3,"div")(4,"cd-table",1),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA()(),e.TgZ(5,"legend"),e.SDv(6,2),e.qZA(),e.TgZ(7,"div"),e._UZ(8,"cd-table",3),e.qZA(),e.YNc(9,Un,2,2,"ng-template",null,4,e.W1O),e.YNc(11,ei,2,2,"ng-template",null,5,e.W1O),e.YNc(13,ni,2,2,"ng-template",null,6,e.W1O)),2&t&&(e.xp6(4),e.Q6J("data",o.gateways)("columns",o.gatewaysColumns),e.xp6(4),e.Q6J("data",o.images)("columns",o.imagesColumns))},dependencies:[c.O5,qn.l,W.a,Et,Xn.h,Rt.g]}),n})(),si=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[c.ez,Pe.m,F.Oz,m.Bz,r.u5,r.UX,F.ZQ,F.HK]}),n})();var _i=p(13464),ai=p(26215),ri=p(45435),Mt=p(36848);let q=class{constructor(s,t){this.http=s,this.timerService=t,this.REFRESH_INTERVAL=3e4,this.summaryDataSource=new ai.X(null),this.summaryData$=this.summaryDataSource.asObservable()}startPolling(){return this.timerService.get(()=>this.retrieveSummaryObservable(),this.REFRESH_INTERVAL).subscribe(this.retrieveSummaryObserver())}refresh(){return this.retrieveSummaryObservable().subscribe(this.retrieveSummaryObserver())}retrieveSummaryObservable(){return this.http.get("api/block/mirroring/summary")}retrieveSummaryObserver(){return s=>{this.summaryDataSource.next(s)}}subscribeSummary(s,t){return this.summaryData$.pipe((0,ri.h)(o=>!!o)).subscribe(s,t)}getPool(s){return this.http.get(`api/block/mirroring/pool/${s}`)}updatePool(s,t){return this.http.put(`api/block/mirroring/pool/${s}`,t,{observe:"response"})}getSiteName(){return this.http.get("api/block/mirroring/site_name")}setSiteName(s){return this.http.put("api/block/mirroring/site_name",{site_name:s},{observe:"response"})}createBootstrapToken(s){return this.http.post(`api/block/mirroring/pool/${s}/bootstrap/token`,{})}importBootstrapToken(s,t,o){return this.http.post(`api/block/mirroring/pool/${s}/bootstrap/peer`,{direction:t,token:o},{observe:"response"})}getPeer(s,t){return this.http.get(`api/block/mirroring/pool/${s}/peer/${t}`)}getPeerForPool(s){return this.http.get(`api/block/mirroring/pool/${s}/peer`)}addPeer(s,t){return this.http.post(`api/block/mirroring/pool/${s}/peer`,t,{observe:"response"})}updatePeer(s,t,o){return this.http.put(`api/block/mirroring/pool/${s}/peer/${t}`,o,{observe:"response"})}deletePeer(s,t){return this.http.delete(`api/block/mirroring/pool/${s}/peer/${t}`,{observe:"response"})}};q.\u0275fac=function(s){return new(s||q)(e.LFG(ie.eN),e.LFG(Mt.f))},q.\u0275prov=e.Yz7({token:q,factory:q.\u0275fac,providedIn:"root"}),(0,D.gn)([(0,D.fM)(0,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[String]),(0,D.w6)("design:returntype",void 0)],q.prototype,"setSiteName",null),(0,D.gn)([(0,D.fM)(1,V.G),(0,D.fM)(2,V.G),(0,D.w6)("design:type",Function),(0,D.w6)("design:paramtypes",[String,String,String]),(0,D.w6)("design:returntype",void 0)],q.prototype,"importBootstrapToken",null),q=(0,D.gn)([V.o,(0,D.w6)("design:paramtypes",[ie.eN,Mt.f])],q);var _t=p(6481),li=p(68307),Ot=p(12627),ci=p(39749),di=p(13472),Oe=p(82945);function pi(n,s){1&n&&(e.TgZ(0,"span",25),e.SDv(1,26),e.qZA())}function ui(n,s){if(1&n&&(e.TgZ(0,"div",27),e._UZ(1,"input",28),e.TgZ(2,"label",29),e._uU(3),e.qZA()()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function mi(n,s){1&n&&(e.TgZ(0,"span",25),e.SDv(1,30),e.qZA())}let gi=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.rbdMirroringService=o,this.taskWrapper=i,this.pools=[],this.createForm()}createForm(){this.createBootstrapForm=new Z.d({siteName:new r.p4("",{validators:[r.kI.required]}),pools:new r.nJ({},{validators:[this.validatePools()]}),token:new r.p4("",{})})}ngOnInit(){this.createBootstrapForm.get("siteName").setValue(this.siteName),this.rbdMirroringService.getSiteName().subscribe(t=>{this.createBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((_,a)=>(_.push({name:a.name,mirror_mode:a.mirror_mode}),_),[]);const i=this.createBootstrapForm.get("pools");C().each(this.pools,_=>{const a=_.name,l="disabled"===_.mirror_mode,d=i.controls[a];d?l&&d.disabled?d.enable():!l&&d.enabled&&(d.disable(),d.setValue(!0)):i.addControl(a,new r.p4({value:!l,disabled:!l}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return C().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}generate(){this.createBootstrapForm.get("token").setValue("");let t="";const o=[],i=this.createBootstrapForm.get("pools");C().each(i.controls,(g,S)=>{!0===g.value&&(t=S,g.disabled||o.push(S))});const _={mirror_mode:"image"},a=(0,_t.z)(this.rbdMirroringService.setSiteName(this.createBootstrapForm.getValue("siteName")),(0,ce.D)(o.map(g=>this.rbdMirroringService.updatePool(g,_))),this.rbdMirroringService.createBootstrapToken(t).pipe((0,li.b)(g=>this.createBootstrapForm.get("token").setValue(g.token)))).pipe((0,Ot.Z)()),l=()=>{this.rbdMirroringService.refresh(),this.createBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/mirroring/bootstrap/create",{}),call:a}).subscribe({error:l,complete:l})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(q),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-create-modal"]],decls:32,vars:6,consts:function(){let s,t,o,i,_,a,l,d,g,S,I;return s="Create Bootstrap Token",t="To create a bootstrap token which can be imported by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, and click\xA0 " + "\ufffd#10\ufffd" + "Generate" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",_="Pools",a="Generate",l="Token",d="Generated token...",g="Close",S="This field is required.",I="At least one pool is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","createBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],_,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],[1,"mb-4","float-end",3,"form","submitAction"],a,["for","token",1,"col-form-label"],l,["placeholder",d,"id","token","formControlName","token","readonly","",1,"form-control","resize-vertical"],["source","token",1,"float-end"],[1,"modal-footer"],["name",g,3,"backAction"],[1,"invalid-feedback"],S,[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],I]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,pi,2,0,"span",12),e.qZA(),e.TgZ(16,"div",13)(17,"label",14),e.SDv(18,15),e.qZA(),e.YNc(19,ui,4,5,"div",16),e.YNc(20,mi,2,0,"span",12),e.qZA(),e.TgZ(21,"cd-submit-button",17),e.NdJ("submitAction",function(){return o.generate()}),e.SDv(22,18),e.qZA(),e.TgZ(23,"div",8)(24,"label",19)(25,"span"),e.SDv(26,20),e.qZA()(),e.TgZ(27,"textarea",21),e._uU(28," "),e.qZA()(),e._UZ(29,"cd-copy-2-clipboard-button",22),e.qZA(),e.TgZ(30,"div",23)(31,"cd-back-button",24),e.NdJ("backAction",function(){return o.activeModal.close()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.createBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.createBootstrapForm.showError("siteName",i,"required")),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.createBootstrapForm.showError("pools",i,"requirePool")),e.xp6(1),e.Q6J("form",o.createBootstrapForm)}},dependencies:[c.sg,c.O5,ci.w,di.W,f.z,Ye.s,Oe.U,M.o,B.b,K.P,J.V,r._Y,r.Fj,r.Wl,r.JJ,r.JL,r.sg,r.u,r.x0],styles:[".form-group.ng-invalid[_ngcontent-%COMP%] .invalid-feedback[_ngcontent-%COMP%]{display:block}"]}),n})();function Ti(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,27),e.qZA())}function fi(n,s){if(1&n&&(e.TgZ(0,"option",28),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.key),e.xp6(1),e.Oqu(t.desc)}}function Ci(n,s){if(1&n&&(e.TgZ(0,"div",29),e._UZ(1,"input",30),e.TgZ(2,"label",31),e._uU(3),e.qZA()()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function Si(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,32),e.qZA())}function Ri(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,33),e.qZA())}function Ei(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,34),e.qZA())}let Mi=(()=>{class n{constructor(t,o,i,_){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.pools=[],this.directions=[{key:"rx-tx",desc:"Bidirectional"},{key:"rx",desc:"Unidirectional (receive-only)"}],this.createForm()}createForm(){this.importBootstrapForm=new Z.d({siteName:new r.p4("",{validators:[r.kI.required]}),direction:new r.p4("rx-tx",{}),pools:new r.nJ({},{validators:[this.validatePools()]}),token:new r.p4("",{validators:[r.kI.required,this.validateToken()]})})}ngOnInit(){this.rbdMirroringService.getSiteName().subscribe(t=>{this.importBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((_,a)=>(_.push({name:a.name,mirror_mode:a.mirror_mode}),_),[]);const i=this.importBootstrapForm.get("pools");C().each(this.pools,_=>{const a=_.name,l="disabled"===_.mirror_mode,d=i.controls[a];d?l&&d.disabled?d.enable():!l&&d.enabled&&(d.disable(),d.setValue(!0)):i.addControl(a,new r.p4({value:!l,disabled:!l}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return C().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}validateToken(){return t=>{try{if(JSON.parse(atob(t.value)))return null}catch{}return{invalidToken:!0}}}import(){const t=[],o=[],i=this.importBootstrapForm.get("pools");C().each(i.controls,(g,S)=>{!0===g.value&&(t.push(S),g.disabled||o.push(S))});const _={mirror_mode:"image"};let a=(0,_t.z)(this.rbdMirroringService.setSiteName(this.importBootstrapForm.getValue("siteName")),(0,ce.D)(o.map(g=>this.rbdMirroringService.updatePool(g,_))));a=t.reduce((g,S)=>(0,_t.z)(g,this.rbdMirroringService.importBootstrapToken(S,this.importBootstrapForm.getValue("direction"),this.importBootstrapForm.getValue("token"))),a).pipe((0,Ot.Z)());const l=()=>{this.rbdMirroringService.refresh(),this.importBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/mirroring/bootstrap/import",{}),call:a}).subscribe({error:l,complete:()=>{l(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(L.p4),e.Y36(q),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-import-modal"]],decls:36,vars:10,consts:function(){let s,t,o,i,_,a,l,d,g,S,I,P;return s="Import Bootstrap Token",t="To import a bootstrap token which was created by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, provide the generated token, and click\xA0" + "\ufffd#10\ufffd" + "Import" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",_="Direction",a="Pools",l="Token",d="Generated token...",g="This field is required.",S="At least one pool is required.",I="This field is required.",P="The token is invalid.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","importBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","direction",1,"col-form-label"],_,["id","direction","name","direction","formControlName","direction",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],a,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],["for","token",1,"col-form-label","required"],l,["placeholder",d,"id","token","formControlName","token",1,"form-control","resize-vertical"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],g,[3,"value"],[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],S,I,P]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,Ti,2,0,"span",12),e.qZA(),e.TgZ(16,"div",8)(17,"label",13)(18,"span"),e.SDv(19,14),e.qZA()(),e.TgZ(20,"select",15),e.YNc(21,fi,2,2,"option",16),e.qZA()(),e.TgZ(22,"div",17)(23,"label",18),e.SDv(24,19),e.qZA(),e.YNc(25,Ci,4,5,"div",20),e.YNc(26,Si,2,0,"span",12),e.qZA(),e.TgZ(27,"div",8)(28,"label",21),e.SDv(29,22),e.qZA(),e.TgZ(30,"textarea",23),e._uU(31," "),e.qZA(),e.YNc(32,Ri,2,0,"span",12),e.YNc(33,Ei,2,0,"span",12),e.qZA()(),e.TgZ(34,"div",24)(35,"cd-form-button-panel",25),e.NdJ("submitActionEvent",function(){return o.import()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.importBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.importBootstrapForm.showError("siteName",i,"required")),e.xp6(6),e.Q6J("ngForOf",o.directions),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("pools",i,"requirePool")),e.xp6(6),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"required")),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"invalidToken")),e.xp6(2),e.Q6J("form",o.importBootstrapForm)("submitText",o.actionLabels.SUBMIT)}},dependencies:[c.sg,c.O5,f.z,A.p,Oe.U,M.o,B.b,K.P,J.V,r._Y,r.YN,r.Kr,r.Fj,r.Wl,r.EJ,r.JJ,r.JL,r.sg,r.u,r.x0]}),n})();var se=p(69158),Oi=p(58111);let at=(()=>{class n{transform(t){return"warning"===t?"badge badge-warning":"error"===t?"badge badge-danger":"success"===t?"badge badge-success":"badge badge-info"}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275pipe=e.Yjl({name:"mirrorHealthColor",type:n,pure:!0}),n})();const hi=["healthTmpl"];function Ai(n,s){if(1&n&&(e.TgZ(0,"span",2),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.health_color)),e.xp6(2),e.Oqu(o)}}let Pi=(()=>{class n{constructor(t,o){this.rbdMirroringService=t,this.cephShortVersionPipe=o,this.tableStatus=new se.E}ngOnInit(){this.columns=[{prop:"instance_id",name:"Instance",flexGrow:2},{prop:"id",name:"ID",flexGrow:2},{prop:"server_hostname",name:"Hostname",flexGrow:2},{prop:"version",name:"Version",pipe:this.cephShortVersionPipe,flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.daemons,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(q),e.Y36(Oi.F))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-daemons"]],viewQuery:function(t,o){if(1&t&&e.Gf(hi,7),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first)}},decls:3,vars:4,consts:[["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],["healthTmpl",""],[3,"ngClass"]],template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA(),e.YNc(1,Ai,3,4,"ng-template",null,1,e.W1O)),2&t&&e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus)},dependencies:[c.mk,W.a,at]}),n})();var ht=p(59376);const bi=["stateTmpl"],Ii=["syncTmpl"],Ni=["progressTmpl"],Fi=["entriesBehindPrimaryTpl"];function Di(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",14),e.NdJ("fetchData",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.refresh())}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_error.data)("columns",t.image_error.columns)("autoReload",-1)("status",t.tableStatus)}}function Li(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",14),e.NdJ("fetchData",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.refresh())}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_syncing.data)("columns",t.image_syncing.columns)("autoReload",-1)("status",t.tableStatus)}}function vi(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",14),e.NdJ("fetchData",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.refresh())}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_ready.data)("columns",t.image_ready.columns)("autoReload",-1)("status",t.tableStatus)}}function $i(n,s){if(1&n&&(e.TgZ(0,"span",15),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.state_color)),e.xp6(2),e.Oqu(o)}}function Bi(n,s){1&n&&e._UZ(0,"div")}function Gi(n,s){if(1&n&&e._UZ(0,"ngb-progressbar",19),2&n){const t=e.oxw().value;e.Q6J("value",t)("showValue",!0)}}function yi(n,s){if(1&n&&(e.YNc(0,Bi,1,0,"div",16),e.TgZ(1,"div",17),e.YNc(2,Gi,1,2,"ngb-progressbar",18),e.qZA()),2&n){const t=s.row;e.Q6J("ngIf","Replaying"===t.state),e.xp6(2),e.Q6J("ngIf","Replaying"===t.state)}}function xi(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",t," ")}}function Zi(n,s){1&n&&(e.TgZ(0,"span",21),e._uU(1,"-"),e.qZA())}function wi(n,s){if(1&n&&(e.YNc(0,xi,2,1,"span",16),e.YNc(1,Zi,2,0,"span",20)),2&n){const t=s.row;e.Q6J("ngIf","journal"===t.mirror_mode),e.xp6(1),e.Q6J("ngIf","snapshot"===t.mirror_mode)}}let Hi=(()=>{class n{constructor(t){this.rbdMirroringService=t,this.image_error={data:[],columns:{}},this.image_syncing={data:[],columns:{}},this.image_ready={data:[],columns:{}},this.tableStatus=new se.E}ngOnInit(){this.image_error.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"description",name:"Issue",flexGrow:4}],this.image_syncing.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"syncing_percent",name:"Progress",cellTemplate:this.progressTmpl,flexGrow:2},{prop:"bytes_per_second",name:"Bytes per second",flexGrow:2},{prop:"entries_behind_primary",name:"Entries behind primary",cellTemplate:this.entriesBehindPrimaryTpl,flexGrow:2}],this.image_ready.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"description",name:"Description",flexGrow:4}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.image_error.data=t.content_data.image_error,this.image_syncing.data=t.content_data.image_syncing,this.image_ready.data=t.content_data.image_ready,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(q))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-images"]],viewQuery:function(t,o){if(1&t&&(e.Gf(bi,7),e.Gf(Ii,7),e.Gf(Ni,7),e.Gf(Fi,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.stateTmpl=i.first),e.iGM(i=e.CRH())&&(o.syncTmpl=i.first),e.iGM(i=e.CRH())&&(o.progressTmpl=i.first),e.iGM(i=e.CRH())&&(o.entriesBehindPrimaryTpl=i.first)}},decls:21,vars:4,consts:function(){let s,t,o;return s="Issues (" + "\ufffd0\ufffd" + ")",t="Syncing (" + "\ufffd0\ufffd" + ")",o="Ready (" + "\ufffd0\ufffd" + ")",[["ngbNav","","cdStatefulTab","image-list",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","issues"],["ngbNavLink",""],s,["ngbNavContent",""],["ngbNavItem","syncing"],t,["ngbNavItem","ready"],o,[3,"ngbNavOutlet"],["stateTmpl",""],["progressTmpl",""],["entriesBehindPrimaryTpl",""],["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],[3,"ngClass"],[4,"ngIf"],[1,"w-100","h-100","d-flex","justify-content-center","align-items-center"],["type","info","class","w-100",3,"value","showValue",4,"ngIf"],["type","info",1,"w-100",3,"value","showValue"],["ngbTooltip","Not available with mirroring snapshot mode",4,"ngIf"],["ngbTooltip","Not available with mirroring snapshot mode"]]},template:function(t,o){if(1&t&&(e.TgZ(0,"nav",0,1),e.ynx(2,2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,Di,1,4,"ng-template",5),e.BQk(),e.ynx(6,6),e.TgZ(7,"a",3),e.SDv(8,7),e.qZA(),e.YNc(9,Li,1,4,"ng-template",5),e.BQk(),e.ynx(10,8),e.TgZ(11,"a",3),e.SDv(12,9),e.qZA(),e.YNc(13,vi,1,4,"ng-template",5),e.BQk(),e.qZA(),e._UZ(14,"div",10),e.YNc(15,$i,3,4,"ng-template",null,11,e.W1O),e.YNc(17,yi,3,2,"ng-template",null,12,e.W1O),e.YNc(19,wi,2,2,"ng-template",null,13,e.W1O)),2&t){const i=e.MAs(1);e.xp6(4),e.pQV(o.image_error.data.length),e.QtT(4),e.xp6(4),e.pQV(o.image_syncing.data.length),e.QtT(8),e.xp6(4),e.pQV(o.image_ready.data.length),e.QtT(12),e.xp6(2),e.Q6J("ngbNavOutlet",i)}},dependencies:[c.mk,c.O5,W.a,ht.m,F.uN,F.Pz,F.nv,F.Vx,F.tO,F.Dy,F.Ly,F._L,at]}),n})();var At=p(70882);class ki{}function Ki(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function qi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,26),e.qZA())}function Xi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,27),e.qZA())}function Qi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,28),e.qZA())}function zi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,29),e.qZA())}function Ji(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,30),e.qZA())}let Yi=(()=>{class n{constructor(t,o,i,_){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.bsConfig={containerClass:"theme-default"},this.createForm()}createForm(){this.editPeerForm=new Z.d({clusterName:new r.p4("",{validators:[r.kI.required,this.validateClusterName]}),clientID:new r.p4("",{validators:[r.kI.required,this.validateClientID]}),monAddr:new r.p4("",{validators:[this.validateMonAddr]}),key:new r.p4("",{validators:[this.validateKey]})})}ngOnInit(){this.pattern=`${this.poolName}/${this.peerUUID}`,"edit"===this.mode&&this.rbdMirroringService.getPeer(this.poolName,this.peerUUID).subscribe(t=>{this.setResponse(t)})}validateClusterName(t){if(!t.value.match(/^[\w\-_]*$/))return{invalidClusterName:{value:t.value}}}validateClientID(t){if(!t.value.match(/^(?!client\.)[\w\-_.]*$/))return{invalidClientID:{value:t.value}}}validateMonAddr(t){if(!t.value.match(/^[,; ]*([\w.\-_\[\]]+(:[\d]+)?[,; ]*)*$/))return{invalidMonAddr:{value:t.value}}}validateKey(t){try{if(""===t.value||atob(t.value))return null}catch{}return{invalidKey:{value:t.value}}}setResponse(t){this.response=t,this.editPeerForm.get("clusterName").setValue(t.cluster_name),this.editPeerForm.get("clientID").setValue(t.client_id),this.editPeerForm.get("monAddr").setValue(t.mon_host),this.editPeerForm.get("key").setValue(t.key)}update(){const t=new ki;let o;t.cluster_name=this.editPeerForm.getValue("clusterName"),t.client_id=this.editPeerForm.getValue("clientID"),t.mon_host=this.editPeerForm.getValue("monAddr"),t.key=this.editPeerForm.getValue("key"),o=this.taskWrapper.wrapTaskAroundCall("edit"===this.mode?{task:new E.R("rbd/mirroring/peer/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePeer(this.poolName,this.peerUUID,t)}:{task:new E.R("rbd/mirroring/peer/add",{pool_name:this.poolName}),call:this.rbdMirroringService.addPeer(this.poolName,t)}),o.subscribe({error:()=>this.editPeerForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(L.p4),e.Y36(q),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-peer-modal"]],decls:38,vars:13,consts:function(){let s,t,o,i,_,a,l,d,g,S,I,P,$,y,Q,Y,ee,te;return s="{VAR_SELECT, select, edit {Edit} other {Add}}",s=e.Zx4(s,{VAR_SELECT:"\ufffd0\ufffd"}),t="" + s + " pool mirror peer",o="{VAR_SELECT, select, edit {Edit} other {Add}}",o=e.Zx4(o,{VAR_SELECT:"\ufffd0\ufffd"}),i="" + o + " the pool mirror peer attributes for pool " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd1\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " and click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Submit" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",i=e.Zx4(i),_="Cluster Name",a="Name...",l="CephX ID",d="CephX ID...",g="Monitor Addresses",S="Comma-delimited addresses...",I="CephX Key",P="Base64-encoded key...",$="This field is required.",y="The cluster name is not valid.",Q="This field is required.",Y="The CephX ID is not valid.",ee="The monitory address is not valid.",te="CephX key must be base64 encoded.",[[3,"modalRef"],[1,"modal-title"],t,[1,"modal-content"],["name","editPeerForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],i,[1,"form-group"],["for","clusterName",1,"col-form-label","required"],_,["type","text","placeholder",a,"id","clusterName","name","clusterName","formControlName","clusterName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","clientID",1,"col-form-label","required"],l,["type","text","placeholder",d,"id","clientID","name","clientID","formControlName","clientID",1,"form-control"],["for","monAddr",1,"col-form-label"],g,["type","text","placeholder",S,"id","monAddr","name","monAddr","formControlName","monAddr",1,"form-control"],["for","key",1,"col-form-label"],I,["type","text","placeholder",P,"id","key","name","key","formControlName","key",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],$,y,Q,Y,ee,te]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0)(1,"span",1),e.SDv(2,2),e.qZA(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p")(8,"span"),e.tHW(9,7),e._UZ(10,"kbd")(11,"kbd"),e.N_p(),e.qZA()(),e.TgZ(12,"div",8)(13,"label",9),e.SDv(14,10),e.qZA(),e._UZ(15,"input",11),e.YNc(16,Ki,2,0,"span",12),e.YNc(17,qi,2,0,"span",12),e.qZA(),e.TgZ(18,"div",8)(19,"label",13),e.SDv(20,14),e.qZA(),e._UZ(21,"input",15),e.YNc(22,Xi,2,0,"span",12),e.YNc(23,Qi,2,0,"span",12),e.qZA(),e.TgZ(24,"div",8)(25,"label",16)(26,"span"),e.SDv(27,17),e.qZA()(),e._UZ(28,"input",18),e.YNc(29,zi,2,0,"span",12),e.qZA(),e.TgZ(30,"div",8)(31,"label",19)(32,"span"),e.SDv(33,20),e.qZA()(),e._UZ(34,"input",21),e.YNc(35,Ji,2,0,"span",12),e.qZA()(),e.TgZ(36,"div",22)(37,"cd-form-button-panel",23),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(2),e.pQV(o.mode),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.editPeerForm),e.xp6(7),e.pQV(o.mode)(o.poolName),e.QtT(9),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"invalidClusterName")),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"invalidClientID")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("monAddr",i,"invalidMonAddr")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("key",i,"invalidKey")),e.xp6(2),e.Q6J("form",o.editPeerForm)("submitText",o.actionLabels.SUBMIT)}},dependencies:[c.O5,f.z,A.p,Oe.U,M.o,B.b,K.P,J.V,r._Y,r.Fj,r.JJ,r.JL,r.sg,r.u]}),n})();const Vi=["healthTmpl"],Ui=["localTmpl"],ji=["remoteTmpl"];function Wi(n,s){if(1&n&&(e.TgZ(0,"span",6),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.health_color)),e.xp6(2),e.Oqu(o)}}function es(n,s){1&n&&(e.TgZ(0,"span",7),e.SDv(1,8),e.qZA())}function ts(n,s){1&n&&(e.TgZ(0,"span",9),e.SDv(1,10),e.qZA())}let ns=(()=>{class n{constructor(t,o,i,_,a){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.taskWrapper=_,this.router=a,this.selection=new Ee.r,this.tableStatus=new se.E,this.data=[],this.permission=this.authStorageService.getPermissions().rbdMirroring;const l={permission:"update",icon:T.P.edit,click:()=>this.editModeModal(),name:"Edit Mode",canBePrimary:()=>!0},d={permission:"create",icon:T.P.add,name:"Add Peer",click:()=>this.editPeersModal("add"),disable:()=>!this.selection.first()||"disabled"===this.selection.first().mirror_mode,visible:()=>!this.getPeerUUID(),canBePrimary:()=>!1},g={permission:"update",icon:T.P.exchange,name:"Edit Peer",click:()=>this.editPeersModal("edit"),visible:()=>!!this.getPeerUUID()},S={permission:"delete",icon:T.P.destroy,name:"Delete Peer",click:()=>this.deletePeersModal(),visible:()=>!!this.getPeerUUID()};this.tableActions=[l,d,g,S]}ngOnInit(){this.columns=[{prop:"name",name:"Name",flexGrow:2},{prop:"mirror_mode",name:"Mode",flexGrow:2},{prop:"leader_id",name:"Leader",flexGrow:2},{prop:"image_local_count",name:"# Local",headerTemplate:this.localTmpl,flexGrow:2},{prop:"image_remote_count",name:"# Remote",headerTemplate:this.remoteTmpl,flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.pools,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}editModeModal(){this.router.navigate(["/block/mirroring",{outlets:{modal:[L.MQ.EDIT,this.selection.first().name]}}])}editPeersModal(t){const o={poolName:this.selection.first().name,mode:t};"edit"===t&&(o.peerUUID=this.getPeerUUID()),this.modalRef=this.modalService.show(Yi,o)}deletePeersModal(){const t=this.selection.first().name,o=this.getPeerUUID();this.modalRef=this.modalService.show(ue.M,{itemDescription:"mirror peer",itemNames:[`${t} (${o})`],submitActionObservable:()=>new At.y(i=>{this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/mirroring/peer/delete",{pool_name:t}),call:this.rbdMirroringService.deletePeer(t,o)}).subscribe({error:_=>i.error(_),complete:()=>{this.rbdMirroringService.refresh(),i.complete()}})})})}getPeerUUID(){const t=this.selection.first(),o=this.data.find(i=>t&&t.name===i.name);if(o&&o.peer_uuids)return o.peer_uuids[0]}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(pe.Z),e.Y36(u.P),e.Y36(m.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-pools"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Vi,7),e.Gf(Ui,7),e.Gf(ji,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first),e.iGM(i=e.CRH())&&(o.localTmpl=i.first),e.iGM(i=e.CRH())&&(o.remoteTmpl=i.first)}},decls:9,vars:7,consts:function(){let s,t,o,i;return s="Local image count",t="# Local",o="Remote image count",i="# Remote",[["columnMode","flex","identifier","name","forceIdentifier","true","selectionType","single",3,"data","columns","autoReload","status","fetchData","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["healthTmpl",""],["localTmpl",""],["remoteTmpl",""],["name","modal"],[3,"ngClass"],["ngbTooltip",s],t,["ngbTooltip",o],i]},template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,Wi,3,4,"ng-template",null,2,e.W1O),e.YNc(4,es,2,0,"ng-template",null,3,e.W1O),e.YNc(6,ts,2,0,"ng-template",null,4,e.W1O),e._UZ(8,"router-outlet",5)),2&t&&(e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},dependencies:[c.mk,W.a,Me.K,m.lC,F._L,at]}),n})();function is(n,s){if(1&n&&e._UZ(0,"i",19),2&n){const t=e.oxw();e.Q6J("ngClass",t.icons.edit)}}function ss(n,s){if(1&n&&e._UZ(0,"i",19),2&n){const t=e.oxw();e.Q6J("ngClass",t.icons.check)}}let _s=(()=>{class n{constructor(t,o,i,_){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.taskWrapper=_,this.selection=new Ee.r,this.peersExist=!0,this.subs=new _i.w,this.editing=!1,this.icons=T.P,this.permission=this.authStorageService.getPermissions().rbdMirroring;const a={permission:"update",icon:T.P.upload,click:()=>this.createBootstrapModal(),name:"Create Bootstrap Token",canBePrimary:()=>!0,disable:()=>!1},l={permission:"update",icon:T.P.download,click:()=>this.importBootstrapModal(),name:"Import Bootstrap Token",disable:()=>!1};this.tableActions=[a,l]}ngOnInit(){this.createForm(),this.subs.add(this.rbdMirroringService.startPolling()),this.subs.add(this.rbdMirroringService.subscribeSummary(t=>{this.status=t.content_data.status,this.peersExist=!!t.content_data.pools.find(o=>o.peer_uuids.length>0)})),this.rbdMirroringService.getSiteName().subscribe(t=>{this.siteName=t.site_name,this.rbdmirroringForm.get("siteName").setValue(this.siteName)})}createForm(){this.rbdmirroringForm=new Z.d({siteName:new r.p4({value:"",disabled:!0})})}ngOnDestroy(){this.subs.unsubscribe()}updateSiteName(){this.editing&&this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/mirroring/site_name/edit",{}),call:this.rbdMirroringService.setSiteName(this.rbdmirroringForm.getValue("siteName"))}).subscribe({complete:()=>{this.rbdMirroringService.refresh()}}),this.editing=!this.editing}createBootstrapModal(){this.modalRef=this.modalService.show(gi,{siteName:this.siteName})}importBootstrapModal(){this.modalRef=this.modalService.show(Mi,{siteName:this.siteName})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(pe.Z),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring"]],decls:31,vars:10,consts:function(){let s,t,o,i;return s="Site Name",t="Daemons",o="Pools",i="Images",[["name","rbdmirroringForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"row","mb-3"],[1,"col-md-auto"],["for","siteName",1,"col-form-label"],s,[1,"col-sm-4","d-flex"],["type","text","id","siteName","name","siteName","formControlName","siteName",1,"form-control"],["id","editSiteName",1,"btn","btn-light",3,"click"],[3,"ngClass",4,"ngIf"],[3,"source","byId"],[1,"col"],[1,"table-actions","float-end",3,"permission","selection","tableActions"],[1,"row"],[1,"col-sm-6"],t,o,[1,"col-md-12"],i,[3,"ngClass"]]},template:function(t,o){1&t&&(e.TgZ(0,"form",0,1)(2,"div",2)(3,"div",3)(4,"label",4),e.SDv(5,5),e.qZA()(),e.TgZ(6,"div",6),e._UZ(7,"input",7),e.TgZ(8,"button",8),e.NdJ("click",function(){return o.updateSiteName()}),e.YNc(9,is,1,1,"i",9),e.YNc(10,ss,1,1,"i",9),e.qZA(),e._UZ(11,"cd-copy-2-clipboard-button",10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"cd-table-actions",12),e.qZA()()(),e.TgZ(14,"div",13)(15,"div",14)(16,"legend"),e.SDv(17,15),e.qZA(),e.TgZ(18,"div"),e._UZ(19,"cd-mirroring-daemons"),e.qZA()(),e.TgZ(20,"div",14)(21,"legend"),e.SDv(22,16),e.qZA(),e.TgZ(23,"div"),e._UZ(24,"cd-mirroring-pools"),e.qZA()()(),e.TgZ(25,"div",13)(26,"div",17)(27,"legend"),e.SDv(28,18),e.qZA(),e.TgZ(29,"div"),e._UZ(30,"cd-mirroring-images"),e.qZA()()()),2&t&&(e.Q6J("formGroup",o.rbdmirroringForm),e.xp6(7),e.uIk("disabled",!o.editing||null),e.xp6(1),e.uIk("title",o.editing?"Save":"Edit"),e.xp6(1),e.Q6J("ngIf",!o.editing),e.xp6(1),e.Q6J("ngIf",o.editing),e.xp6(1),e.Q6J("source",o.siteName)("byId",!1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},dependencies:[c.mk,c.O5,Ye.s,Me.K,M.o,B.b,J.V,r._Y,r.Fj,r.JJ,r.JL,r.sg,r.u,Pi,Hi,ns]}),n})();class as{}function rs(n,s){if(1&n&&(e.TgZ(0,"option",16),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.id),e.xp6(1),e.Oqu(t.name)}}function ls(n,s){1&n&&(e.TgZ(0,"span",17),e.SDv(1,18),e.qZA())}let cs=(()=>{class n{constructor(t,o,i,_,a,l){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.route=a,this.location=l,this.bsConfig={containerClass:"theme-default"},this.peerExists=!1,this.mirrorModes=[{id:"disabled",name:"Disabled"},{id:"pool",name:"Pool"},{id:"image",name:"Image"}],this.createForm()}createForm(){this.editModeForm=new Z.d({mirrorMode:new r.p4("",{validators:[r.kI.required,this.validateMode.bind(this)]})})}ngOnInit(){this.route.params.subscribe(t=>{this.poolName=t.pool_name}),this.pattern=`${this.poolName}`,this.rbdMirroringService.getPool(this.poolName).subscribe(t=>{this.setResponse(t)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.peerExists=!1;const i=t.content_data.pools.find(_=>this.poolName===_.name);this.peerExists=i&&i.peer_uuids.length})}ngOnDestroy(){this.subs.unsubscribe()}validateMode(t){return"disabled"===t.value&&this.peerExists?{cannotDisable:{value:t.value}}:null}setResponse(t){this.editModeForm.get("mirrorMode").setValue(t.mirror_mode)}update(){const t=new as;t.mirror_mode=this.editModeForm.getValue("mirrorMode"),this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/mirroring/pool/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePool(this.poolName,t)}).subscribe({error:()=>this.editModeForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.location.back()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(L.p4),e.Y36(q),e.Y36(u.P),e.Y36(m.gz),e.Y36(c.Ye))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-mode-modal"]],decls:21,vars:7,consts:function(){let s,t,o,i;return s="Edit pool mirror mode",t="To edit the mirror mode for pool\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ", select a new mode from the list and click\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Update" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",t=e.Zx4(t),o="Mode",i="Peer clusters must be removed prior to disabling mirror.",[["pageURL","mirroring",3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","editModeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","mirrorMode",1,"col-form-label"],o,["id","mirrorMode","name","mirrorMode","formControlName","mirrorMode",1,"form-select"],[3,"value",4,"ngFor","ngForOf"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[3,"value"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd")(11,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(12,"div",8)(13,"label",9)(14,"span"),e.SDv(15,10),e.qZA()(),e.TgZ(16,"select",11),e.YNc(17,rs,2,2,"option",12),e.qZA(),e.YNc(18,ls,2,0,"span",13),e.qZA()(),e.TgZ(19,"div",14)(20,"cd-form-button-panel",15),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.editModeForm),e.xp6(7),e.pQV(o.poolName),e.QtT(9),e.xp6(6),e.Q6J("ngForOf",o.mirrorModes),e.xp6(1),e.Q6J("ngIf",o.editModeForm.showError("mirrorMode",i,"cannotDisable")),e.xp6(2),e.Q6J("form",o.editModeForm)("submitText",o.actionLabels.UPDATE)}},dependencies:[c.sg,c.O5,f.z,A.p,M.o,K.P,J.V,r._Y,r.YN,r.Kr,r.EJ,r.JJ,r.JL,r.sg,r.u]}),n})();var Pt=p(7357),ds=p(28049),ps=p(43190),Ve=p(80842),rt=p(30633),Ue=p(47557),us=p(28211);class ms{}var Ie=(()=>{return(n=Ie||(Ie={}))[n.V1=1]="V1",n[n.V2=2]="V2",Ie;var n})();class gs{constructor(){this.features=[]}}class Ts{constructor(){this.features=[]}}class fs{}class Cs extends fs{constructor(){super(...arguments),this.features=[]}}class lt{constructor(){this.features=[],this.remove_scheduling=!1}}var je=(()=>{return(n=je||(je={})).editing="editing",n.cloning="cloning",n.copying="copying",je;var n})(),bt=p(18372),Ss=p(17932),Rs=p(60950);function Es(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",58),e.SDv(2,59),e.ALo(3,"titlecase"),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",60)(6,"hr"),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(3),e.pQV(e.lcZ(3,1,t.action)),e.QtT(2)}}function Ms(n,s){1&n&&(e.TgZ(0,"span",61),e.ynx(1),e.SDv(2,62),e.BQk(),e.qZA())}function Os(n,s){1&n&&(e.TgZ(0,"span",61),e.ynx(1),e.SDv(2,63),e.BQk(),e.qZA())}function hs(n,s){1&n&&e._UZ(0,"input",64)}function As(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,67),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ps(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,68),e.qZA()),2&n&&e.Q6J("ngValue",null)}function bs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,69),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Is(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function Ns(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"select",65),e.NdJ("change",function(){e.CHM(t);const i=e.oxw(2);return e.KtG(i.setPoolMirrorMode())}),e.YNc(1,As,2,1,"option",66),e.YNc(2,Ps,2,1,"option",66),e.YNc(3,bs,2,1,"option",66),e.YNc(4,Is,2,2,"option",46),e.qZA()}if(2&n){const t=e.oxw(2);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function Fs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,71),e.qZA())}const Ds=function(n,s){return[n,s]};function Ls(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"div",20),e._UZ(2,"i",72),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(2),e.Q6J("ngClass",e.WLB(1,Ds,t.icons.spinner,t.icons.spin))}}function vs(n,s){1&n&&e._UZ(0,"input",76)}function $s(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,78),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Bs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,79),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Gs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,80),e.qZA()),2&n&&e.Q6J("ngValue",null)}function ys(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function xs(n,s){if(1&n&&(e.TgZ(0,"select",77),e.YNc(1,$s,2,1,"option",66),e.YNc(2,Bs,2,1,"option",66),e.YNc(3,Gs,2,1,"option",66),e.YNc(4,ys,2,2,"option",46),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.namespaces)}}function Zs(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",73),e._uU(2," Namespace "),e.qZA(),e.TgZ(3,"div",12),e.YNc(4,vs,1,0,"input",74),e.YNc(5,xs,5,4,"select",75),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngIf","editing"===t.mode||!t.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==t.mode&&t.poolPermission.read)}}function ws(n,s){1&n&&(e.TgZ(0,"cd-helper")(1,"span"),e.SDv(2,81),e.qZA()())}function Hs(n,s){1&n&&e._UZ(0,"input",87)}function ks(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,89),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ks(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,90),e.qZA()),2&n&&e.Q6J("ngValue",null)}function qs(n,s){1&n&&(e.TgZ(0,"option",50),e._uU(1,"-- Select a data pool -- "),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Xs(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function Qs(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"select",88),e.NdJ("change",function(i){e.CHM(t);const _=e.oxw(3);return e.KtG(_.onDataPoolChange(i.target.value))}),e.YNc(1,ks,2,1,"option",66),e.YNc(2,Ks,2,1,"option",66),e.YNc(3,qs,2,1,"option",66),e.YNc(4,Xs,2,2,"option",46),e.qZA()}if(2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.dataPools),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&0===t.dataPools.length),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&t.dataPools.length>0),e.xp6(1),e.Q6J("ngForOf",t.dataPools)}}function zs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,91),e.qZA())}const We=function(n){return{required:n}};function Js(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",82)(2,"span",72),e.SDv(3,83),e.qZA(),e._UZ(4,"cd-helper",84),e.qZA(),e.TgZ(5,"div",12),e.YNc(6,Hs,1,0,"input",85),e.YNc(7,Qs,5,4,"select",86),e.YNc(8,zs,2,0,"span",14),e.qZA()()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(2),e.Q6J("ngClass",e.VKq(4,We,"editing"!==o.mode)),e.xp6(4),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("dataPool",t,"required"))}}function Ys(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,92),e.qZA())}function Vs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,93),e.qZA())}function Us(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,94),e.qZA())}function js(n,s){if(1&n&&e._UZ(0,"cd-helper",98),2&n){const t=e.oxw().$implicit;e.s9C("html",t.helperHtml)}}function Ws(n,s){if(1&n&&(e.TgZ(0,"div",21),e._UZ(1,"input",95),e.TgZ(2,"label",96),e._uU(3),e.qZA(),e.YNc(4,js,1,1,"cd-helper",97),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.key),e.s9C("name",t.key),e.s9C("formControlName",t.key),e.xp6(1),e.s9C("for",t.key),e.xp6(1),e.Oqu(t.desc),e.xp6(1),e.Q6J("ngIf",t.helperHtml)}}const It=function(n){return["edit",n]},Nt=function(n){return{modal:n}},Ft=function(n){return{outlets:n}},Dt=function(n){return["/block/mirroring",n]};function e_(n,s){if(1&n&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,99),e._UZ(3,"b")(4,"a",100),e.N_p(),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("routerLink",e.VKq(7,Dt,e.VKq(5,Ft,e.VKq(3,Nt,e.VKq(1,It,t.currentPoolName)))))}}function t_(n,s){if(1&n&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,105),e._UZ(3,"b")(4,"a",100),e.N_p(),e.qZA()()),2&n){const t=e.oxw(4);e.xp6(4),e.Q6J("routerLink",e.VKq(7,Dt,e.VKq(5,Ft,e.VKq(3,Nt,e.VKq(1,It,t.currentPoolName)))))}}function o_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",102)(1,"input",103),e.NdJ("change",function(){e.CHM(t);const i=e.oxw(3);return e.KtG(i.setExclusiveLock())}),e.qZA(),e.TgZ(2,"label",104),e._uU(3),e.ALo(4,"titlecase"),e.qZA(),e.YNc(5,t_,5,9,"cd-helper",25),e.qZA()}if(2&n){const t=s.$implicit,o=e.oxw(3);e.xp6(1),e.Q6J("id",t)("value",t),e.uIk("disabled","pool"===o.poolMirrorMode&&"snapshot"===t||null),e.xp6(1),e.Q6J("for",t),e.xp6(1),e.Oqu(e.lcZ(4,6,t)),e.xp6(2),e.Q6J("ngIf","pool"===o.poolMirrorMode&&"snapshot"===t)}}function n_(n,s){if(1&n&&(e.TgZ(0,"div"),e.YNc(1,o_,6,8,"div",101),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.Q6J("ngForOf",t.mirroringOptions)}}function i_(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",106),e.tHW(2,107),e._UZ(3,"cd-helper",108),e.N_p(),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",109),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(5),e.uIk("disabled",!1===t.peerConfigured||null)}}function s_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"a",110),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).advancedEnabled=!0,e.KtG(!1)}),e.SDv(1,111),e.qZA()}}function __(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function a_(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function r_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,112),e.qZA())}function l_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,113),e.qZA())}function c_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,114),e.qZA())}function d_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,115),e.qZA())}function p_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,Es,7,3,"div",8),e.TgZ(10,"div",9)(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,Ms,3,0,"span",14),e.YNc(16,Os,3,0,"span",14),e.qZA()(),e.TgZ(17,"div",15),e.NdJ("change",function(i){e.CHM(t);const _=e.oxw();return e.KtG(_.onPoolChange(i.target.value))}),e.TgZ(18,"label",16),e.SDv(19,17),e.qZA(),e.TgZ(20,"div",12),e.YNc(21,hs,1,0,"input",18),e.YNc(22,Ns,5,4,"select",19),e.YNc(23,Fs,2,0,"span",14),e.qZA()(),e.YNc(24,Ls,3,4,"div",8),e.YNc(25,Zs,6,2,"div",8),e.TgZ(26,"div",9)(27,"div",20)(28,"div",21)(29,"input",22),e.NdJ("change",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.onUseDataPoolChange())}),e.qZA(),e.TgZ(30,"label",23),e.SDv(31,24),e.qZA(),e.YNc(32,ws,3,0,"cd-helper",25),e.qZA()()(),e.YNc(33,Js,9,6,"div",8),e.TgZ(34,"div",9)(35,"label",26),e.SDv(36,27),e.qZA(),e.TgZ(37,"div",12),e._UZ(38,"input",28),e.YNc(39,Ys,2,0,"span",14),e.YNc(40,Vs,2,0,"span",14),e.YNc(41,Us,2,0,"span",14),e.qZA()(),e.TgZ(42,"div",29)(43,"label",30),e.SDv(44,31),e.qZA(),e.TgZ(45,"div",12),e.YNc(46,Ws,5,6,"div",32),e.qZA()(),e.TgZ(47,"div",9)(48,"div",20)(49,"div",21)(50,"input",33),e.NdJ("change",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.setMirrorMode())}),e.qZA(),e.TgZ(51,"label",34),e._uU(52,"Mirroring"),e.qZA(),e.YNc(53,e_,5,9,"cd-helper",25),e.qZA(),e.YNc(54,n_,2,1,"div",25),e.qZA()(),e.YNc(55,i_,6,1,"div",8),e.TgZ(56,"div",35)(57,"div",36),e.YNc(58,s_,2,0,"a",37),e.qZA()(),e.TgZ(59,"div",38)(60,"legend",39),e.SDv(61,40),e.qZA(),e.TgZ(62,"div",41)(63,"h4",39),e.SDv(64,42),e.qZA(),e.TgZ(65,"div",9)(66,"label",43),e.tHW(67,44),e._UZ(68,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(69,"div",12)(70,"select",45),e.YNc(71,__,2,2,"option",46),e.qZA()()(),e.TgZ(72,"div",9)(73,"label",47),e.tHW(74,48),e._UZ(75,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(76,"div",12)(77,"select",49)(78,"option",50),e.SDv(79,51),e.qZA(),e.YNc(80,a_,2,2,"option",46),e.qZA(),e.YNc(81,r_,2,0,"span",14),e.YNc(82,l_,2,0,"span",14),e.qZA()(),e.TgZ(83,"div",9)(84,"label",52),e.tHW(85,53),e._UZ(86,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(87,"div",12),e._UZ(88,"input",54),e.YNc(89,c_,2,0,"span",14),e.YNc(90,d_,2,0,"span",14),e.qZA()()(),e.TgZ(91,"cd-rbd-configuration-form",55),e.NdJ("changes",function(i){e.CHM(t);const _=e.oxw();return e.KtG(_.getDirtyConfigurationValues=i)}),e.qZA()()(),e.TgZ(92,"div",56)(93,"cd-form-button-panel",57),e.NdJ("submitActionEvent",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.submit())}),e.ALo(94,"titlecase"),e.ALo(95,"upperFirst"),e.qZA()()()()()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.rbdForm),e.xp6(6),e.pQV(e.lcZ(6,36,o.action))(e.lcZ(7,38,o.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",o.rbdForm.getValue("parent")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("name",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("name",t,"pattern")),e.xp6(2),e.Q6J("ngClass",e.VKq(44,We,"editing"!==o.mode)),e.xp6(3),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("pool",t,"required")),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.rbdForm.getValue("pool")&&null===o.namespaces),e.xp6(1),e.Q6J("ngIf","editing"===o.mode&&o.rbdForm.getValue("namespace")||"editing"!==o.mode&&(o.namespaces&&o.namespaces.length>0||!o.poolPermission.read)),e.xp6(7),e.Q6J("ngIf",o.allDataPools.length<=1),e.xp6(1),e.Q6J("ngIf",o.rbdForm.getValue("useDataPool")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("size",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("size",t,"invalidSizeObject")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("size",t,"pattern")),e.xp6(5),e.Q6J("ngForOf",o.featuresList),e.xp6(7),e.Q6J("ngIf",!1===o.mirroring&&o.currentPoolName),e.xp6(1),e.Q6J("ngIf",o.mirroring),e.xp6(1),e.Q6J("ngIf","snapshot"===o.rbdForm.getValue("mirroringMode")&&o.mirroring),e.xp6(3),e.Q6J("ngIf",!o.advancedEnabled),e.xp6(1),e.Q6J("hidden",!o.advancedEnabled),e.xp6(12),e.Q6J("ngForOf",o.objectSizes),e.xp6(2),e.Q6J("ngClass",e.VKq(46,We,o.rbdForm.getValue("stripingCount"))),e.xp6(5),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.objectSizes),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"invalidStripingUnit")),e.xp6(2),e.Q6J("ngClass",e.VKq(48,We,o.rbdForm.getValue("stripingUnit"))),e.xp6(5),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"min")),e.xp6(1),e.Q6J("form",o.rbdForm)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",t)("submitText",e.lcZ(94,40,o.action)+" "+e.lcZ(95,42,o.resource))}}let $e=(()=>{class n extends k.E{constructor(t,o,i,_,a,l,d,g,S,I){super(),this.authStorageService=t,this.route=o,this.poolService=i,this.rbdService=_,this.formatter=a,this.taskWrapper=l,this.dimlessBinaryPipe=d,this.actionLabels=g,this.router=S,this.rbdMirroringService=I,this.namespaces=[],this.namespacesByPoolCache={},this.pools=null,this.allPools=null,this.dataPools=null,this.allDataPools=[],this.featuresList=[],this.initializeConfigData=new Pt.t(1),this.peerConfigured=!1,this.advancedEnabled=!1,this.rbdFormMode=je,this.defaultObjectSize="4 MiB",this.mirroringOptions=["journal","snapshot"],this.mirroring=!1,this.currentPoolName="",this.objectSizes=["4 KiB","8 KiB","16 KiB","32 KiB","64 KiB","128 KiB","256 KiB","512 KiB","1 MiB","2 MiB","4 MiB","8 MiB","16 MiB","32 MiB"],this.defaultStripingUnit="4 MiB",this.defaultStripingCount=1,this.rbdImage=new Pt.t(1),this.icons=T.P,this.routerUrl=this.router.url,this.poolPermission=this.authStorageService.getPermissions().pool,this.resource="RBD",this.features={"deep-flatten":{desc:"Deep flatten",requires:null,allowEnable:!1,allowDisable:!0,helperHtml:"Feature can be disabled but can't be re-enabled later"},layering:{desc:"Layering",requires:null,allowEnable:!1,allowDisable:!1,helperHtml:"Feature flag can't be manipulated after the image is created. Disabling this option will also disable the Protect and Clone actions on Snapshot"},"exclusive-lock":{desc:"Exclusive lock",requires:null,allowEnable:!0,allowDisable:!0},"object-map":{desc:"Object map (requires exclusive-lock)",requires:"exclusive-lock",allowEnable:!0,allowDisable:!0,initDisabled:!0},"fast-diff":{desc:"Fast diff (interlocked with object-map)",requires:"object-map",allowEnable:!0,allowDisable:!0,interlockedWith:"object-map",initDisabled:!0}},this.featuresList=this.objToArray(this.features),this.createForm()}objToArray(t){return C().map(t,(o,i)=>Object.assign(o,{key:i}))}createForm(){this.rbdForm=new Z.d({parent:new r.p4(""),name:new r.p4("",{validators:[r.kI.required,r.kI.pattern(/^[^@/]+?$/)]}),pool:new r.p4(null,{validators:[r.kI.required]}),namespace:new r.p4(null),useDataPool:new r.p4(!1),dataPool:new r.p4(null),size:new r.p4(null,{updateOn:"blur"}),obj_size:new r.p4(this.defaultObjectSize),features:new Z.d(this.featuresList.reduce((t,o)=>(t[o.key]=new r.p4({value:!1,disabled:!!o.initDisabled}),t),{})),mirroring:new r.p4(""),schedule:new r.p4("",{validators:[r.kI.pattern(/^([0-9]+)d|([0-9]+)h|([0-9]+)m$/)]}),mirroringMode:new r.p4(""),stripingUnit:new r.p4(this.defaultStripingUnit),stripingCount:new r.p4(this.defaultStripingCount,{updateOn:"blur"})},this.validateRbdForm(this.formatter))}disableForEdit(){this.rbdForm.get("parent").disable(),this.rbdForm.get("pool").disable(),this.rbdForm.get("namespace").disable(),this.rbdForm.get("useDataPool").disable(),this.rbdForm.get("dataPool").disable(),this.rbdForm.get("obj_size").disable(),this.rbdForm.get("stripingUnit").disable(),this.rbdForm.get("stripingCount").disable(),this.rbdImage.subscribe(t=>{t.image_format===Ie.V1?(this.rbdForm.get("deep-flatten").disable(),this.rbdForm.get("layering").disable(),this.rbdForm.get("exclusive-lock").disable()):(this.rbdForm.get("deep-flatten").value||this.rbdForm.get("deep-flatten").disable(),this.rbdForm.get("layering").disable())})}disableForClone(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}disableForCopy(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}ngOnInit(){this.prepareFormForAction(),this.gatherNeededData().subscribe(this.handleExternalData.bind(this))}setExclusiveLock(){this.mirroring&&"journal"===this.rbdForm.get("mirroringMode").value?(this.rbdForm.get("exclusive-lock").setValue(!0),this.rbdForm.get("exclusive-lock").disable()):(this.rbdForm.get("exclusive-lock").enable(),"pool"===this.poolMirrorMode&&this.rbdForm.get("mirroringMode").setValue(this.mirroringOptions[0]))}setMirrorMode(){this.mirroring=!this.mirroring,this.setExclusiveLock(),this.checkPeersConfigured()}checkPeersConfigured(t){var o=t||this.rbdForm.get("pool").value;this.rbdMirroringService.getPeerForPool(o).subscribe(i=>{i.length>0&&(this.peerConfigured=!0)})}setPoolMirrorMode(){this.currentPoolName=this.mode===this.rbdFormMode.editing?this.response?.pool_name:this.rbdForm.getValue("pool"),this.currentPoolName&&(this.rbdMirroringService.refresh(),this.rbdMirroringService.subscribeSummary(t=>{const o=t.content_data.pools.find(i=>i.name===this.currentPoolName);this.poolMirrorMode=o.mirror_mode,"disabled"===o.mirror_mode&&(this.mirroring=!1,this.rbdForm.get("mirroring").setValue(this.mirroring),this.rbdForm.get("mirroring").disable())})),this.setExclusiveLock()}prepareFormForAction(){const t=this.routerUrl;t.startsWith("/block/rbd/edit")?(this.mode=this.rbdFormMode.editing,this.action=this.actionLabels.EDIT,this.disableForEdit()):t.startsWith("/block/rbd/clone")?(this.mode=this.rbdFormMode.cloning,this.disableForClone(),this.action=this.actionLabels.CLONE):t.startsWith("/block/rbd/copy")?(this.mode=this.rbdFormMode.copying,this.action=this.actionLabels.COPY,this.disableForCopy()):this.action=this.actionLabels.CREATE,C().each(this.features,o=>{this.rbdForm.get("features").get(o.key).valueChanges.subscribe(i=>this.featureFormUpdate(o.key,i))})}gatherNeededData(){const t={};return this.mode?this.route.params.subscribe(o=>{const i=v.N.fromString(decodeURIComponent(o.image_spec));o.snap&&(this.snapName=decodeURIComponent(o.snap)),t.rbd=this.rbdService.get(i),this.checkPeersConfigured(i.poolName)}):t.defaultFeatures=this.rbdService.defaultFeatures(),this.mode!==this.rbdFormMode.editing&&this.poolPermission.read&&(t.pools=this.poolService.list(["pool_name","type","flags_names","application_metadata"])),(0,ce.D)(t)}handleExternalData(t){if(this.handlePoolData(t.pools),this.setPoolMirrorMode(),t.defaultFeatures&&this.setFeatures(t.defaultFeatures),t.rbd){const o=t.rbd;this.setResponse(o,this.snapName),this.rbdImage.next(o)}this.loadingReady()}handlePoolData(t){if(!t)return;const o=[],i=[];for(const _ of t)this.rbdService.isRBDPool(_)&&("replicated"===_.type?(o.push(_),i.push(_)):"erasure"===_.type&&-1!==_.flags_names.indexOf("ec_overwrites")&&i.push(_));if(this.pools=o,this.allPools=o,this.dataPools=i,this.allDataPools=i,1===this.pools.length){const _=this.pools[0].pool_name;this.rbdForm.get("pool").setValue(_),this.onPoolChange(_)}this.allDataPools.length<=1&&this.rbdForm.get("useDataPool").disable()}onPoolChange(t){const o=this.rbdForm.get("dataPool");o.value===t&&o.setValue(null),this.dataPools=this.allDataPools?this.allDataPools.filter(i=>i.pool_name!==t):[],this.namespaces=null,t in this.namespacesByPoolCache?this.namespaces=this.namespacesByPoolCache[t]:this.rbdService.listNamespaces(t).subscribe(i=>{i=i.map(_=>_.namespace),this.namespacesByPoolCache[t]=i,this.namespaces=i}),this.rbdForm.get("namespace").setValue(null)}onUseDataPoolChange(){this.rbdForm.getValue("useDataPool")||(this.rbdForm.get("dataPool").setValue(null),this.onDataPoolChange(null))}onDataPoolChange(t){const o=this.allPools.filter(i=>i.pool_name!==t);this.rbdForm.getValue("pool")===t&&this.rbdForm.get("pool").setValue(null),this.pools=o}validateRbdForm(t){return o=>{const i=o.get("useDataPool"),_=o.get("dataPool");let a=null;i.value&&null==_.value&&(a={required:!0}),_.setErrors(a);const l=o.get("size"),d=o.get("obj_size"),g=t.toBytes(null!=d.value?d.value:this.defaultObjectSize),S=o.get("stripingCount"),I=null!=S.value?S.value:this.defaultStripingCount;let P=null;null===l.value?P={required:!0}:I*g>t.toBytes(l.value)&&(P={invalidSizeObject:!0}),l.setErrors(P);const $=o.get("stripingUnit");let y=null;null===$.value&&null!==S.value?y={required:!0}:null!==$.value&&t.toBytes($.value)>g&&(y={invalidStripingUnit:!0}),$.setErrors(y);let Q=null;return null===S.value&&null!==$.value?Q={required:!0}:I<1&&(Q={min:!0}),S.setErrors(Q),null}}deepBoxCheck(t,o){this.getDependentChildFeatures(t).forEach(_=>{const a=this.rbdForm.get(_.key);o?a.enable({emitEvent:!1}):(a.disable({emitEvent:!1}),a.setValue(!1,{emitEvent:!1}),this.deepBoxCheck(_.key,o));const l=this.rbdForm.get("features");this.mode===this.rbdFormMode.editing&&l.get(_.key).enabled&&(-1!==this.response.features_name.indexOf(_.key)&&!_.allowDisable||-1===this.response.features_name.indexOf(_.key)&&!_.allowEnable)&&l.get(_.key).disable()})}getDependentChildFeatures(t){return C().filter(this.features,o=>o.requires===t)||[]}interlockCheck(t,o){const i=this.featuresList.find(_=>_.key===t);if(this.response){const _=null!=i.interlockedWith,a=this.featuresList.find(d=>d.interlockedWith===i.key),l=!!this.response.features_name.find(d=>d===i.key);if(_){if(l!==!!this.response.features_name.find(g=>g===i.interlockedWith))return}else if(a&&!!this.response.features_name.find(g=>g===a.key)!==l)return}o?C().filter(this.features,_=>_.interlockedWith===t).forEach(_=>this.rbdForm.get(_.key).setValue(!0,{emitEvent:!1})):i.interlockedWith&&this.rbdForm.get("features").get(i.interlockedWith).setValue(!1)}featureFormUpdate(t,o){if(o){const i=this.features[t].requires;if(i&&!this.rbdForm.getValue(i))return void this.rbdForm.get(`features.${t}`).setValue(!1)}this.deepBoxCheck(t,o),this.interlockCheck(t,o)}setFeatures(t){const o=this.rbdForm.get("features");C().forIn(this.features,i=>{-1!==t.indexOf(i.key)&&o.get(i.key).setValue(!0),this.featureFormUpdate(i.key,o.get(i.key).value)})}setResponse(t,o){this.response=t;const i=new v.N(t.pool_name,t.namespace,t.name).toString();if(this.mode===this.rbdFormMode.cloning)this.rbdForm.get("parent").setValue(`${i}@${o}`);else if(this.mode===this.rbdFormMode.copying)o?this.rbdForm.get("parent").setValue(`${i}@${o}`):this.rbdForm.get("parent").setValue(`${i}`);else if(t.parent){const _=t.parent;this.rbdForm.get("parent").setValue(`${_.pool_name}/${_.image_name}@${_.snap_name}`)}this.mode===this.rbdFormMode.editing&&(this.rbdForm.get("name").setValue(t.name),"snapshot"===t?.mirror_mode||t.features_name.includes("journaling")?(this.mirroring=!0,this.rbdForm.get("mirroring").setValue(this.mirroring),this.rbdForm.get("mirroringMode").setValue(t?.mirror_mode),this.rbdForm.get("schedule").setValue(t?.schedule_interval)):(this.mirroring=!1,this.rbdForm.get("mirroring").setValue(this.mirroring)),this.setPoolMirrorMode()),this.rbdForm.get("pool").setValue(t.pool_name),this.onPoolChange(t.pool_name),this.rbdForm.get("namespace").setValue(t.namespace),t.data_pool&&(this.rbdForm.get("useDataPool").setValue(!0),this.rbdForm.get("dataPool").setValue(t.data_pool)),this.rbdForm.get("size").setValue(this.dimlessBinaryPipe.transform(t.size)),this.rbdForm.get("obj_size").setValue(this.dimlessBinaryPipe.transform(t.obj_size)),this.setFeatures(t.features_name),this.rbdForm.get("stripingUnit").setValue(this.dimlessBinaryPipe.transform(t.stripe_unit)),this.rbdForm.get("stripingCount").setValue(t.stripe_count),this.initializeConfigData.next({initialData:this.response.configuration,sourceType:rt.h.image})}createRequest(){const t=new Cs;return t.pool_name=this.rbdForm.getValue("pool"),t.namespace=this.rbdForm.getValue("namespace"),t.name=this.rbdForm.getValue("name"),t.schedule_interval=this.rbdForm.getValue("schedule"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),"image"===this.poolMirrorMode&&(t.mirror_mode=this.rbdForm.getValue("mirroringMode")),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(),t}addObjectSizeAndStripingToRequest(t){t.obj_size=this.formatter.toBytes(this.rbdForm.getValue("obj_size")),C().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),this.mirroring&&"journal"===this.rbdForm.getValue("mirroringMode")&&t.features.push("journaling"),t.stripe_unit=this.formatter.toBytes(this.rbdForm.getValue("stripingUnit")),t.stripe_count=this.rbdForm.getValue("stripingCount"),t.data_pool=this.rbdForm.getValue("dataPool")}createAction(){const t=this.createRequest();return this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/create",{pool_name:t.pool_name,namespace:t.namespace,image_name:t.name,schedule_interval:t.schedule_interval,start_time:t.start_time}),call:this.rbdService.create(t)})}editRequest(){const t=new lt;if(t.name=this.rbdForm.getValue("name"),t.schedule_interval=this.rbdForm.getValue("schedule"),t.name=this.rbdForm.getValue("name"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),C().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),t.enable_mirror=this.rbdForm.getValue("mirroring"),t.enable_mirror)"journal"===this.rbdForm.getValue("mirroringMode")&&t.features.push("journaling"),"image"===this.poolMirrorMode&&(t.mirror_mode=this.rbdForm.getValue("mirroringMode"));else{const o=t.features.indexOf("journaling",0);o>-1&&t.features.splice(o,1)}return t.configuration=this.getDirtyConfigurationValues(),t}cloneRequest(){const t=new gs;return t.child_pool_name=this.rbdForm.getValue("pool"),t.child_namespace=this.rbdForm.getValue("namespace"),t.child_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,rt.h.image),t}editAction(){const t=new v.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/edit",{image_spec:t.toString()}),call:this.rbdService.update(t,this.editRequest())})}cloneAction(){const t=this.cloneRequest(),o=new v.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/clone",{parent_image_spec:o.toString(),parent_snap_name:this.snapName,child_pool_name:t.child_pool_name,child_namespace:t.child_namespace,child_image_name:t.child_image_name}),call:this.rbdService.cloneSnapshot(o,this.snapName,t)})}copyRequest(){const t=new Ts;return this.snapName&&(t.snapshot_name=this.snapName),t.dest_pool_name=this.rbdForm.getValue("pool"),t.dest_namespace=this.rbdForm.getValue("namespace"),t.dest_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,rt.h.image),t}copyAction(){const t=this.copyRequest(),o=new v.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/copy",{src_image_spec:o.toString(),dest_pool_name:t.dest_pool_name,dest_namespace:t.dest_namespace,dest_image_name:t.dest_image_name}),call:this.rbdService.copy(o,t)})}submit(){this.mode||this.rbdImage.next("create"),this.rbdImage.pipe((0,ds.P)(),(0,ps.w)(()=>this.mode===this.rbdFormMode.editing?this.editAction():this.mode===this.rbdFormMode.cloning?this.cloneAction():this.mode===this.rbdFormMode.copying?this.copyAction():this.createAction())).subscribe(()=>{},()=>this.rbdForm.setErrors({cdSubmitButton:!0}),()=>this.router.navigate(["/block/rbd"]))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(m.gz),e.Y36(Ve.q),e.Y36(H),e.Y36(us.H),e.Y36(u.P),e.Y36(Ue.$),e.Y36(L.p4),e.Y36(m.F0),e.Y36(q))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let s,t,o,i,_,a,l,d,g,S,I,P,$,y,Q,Y,ee,te,w,_e,ae,O,me,ge,Te,fe,Ce,Se,Re,G,Ge,ye,xe,Ze,we,He,ke,Ke,qe,Xe,Qe,ze;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="Pool",i="Use a dedicated data pool",_="Size",a="e.g., 10GiB",l="Features",d="Advanced",g="Striping",S="Object size" + "\ufffd#68\ufffd" + "Objects in the Ceph Storage Cluster have a maximum configurable size (e.g., 2MB, 4MB, etc.). The object size should be large enough to accommodate many stripe units, and should be a multiple of the stripe unit." + "\ufffd/#68\ufffd" + "",I="Stripe unit" + "\ufffd#75\ufffd" + "Stripes have a configurable unit size (e.g., 64kb). The Ceph Client divides the data it will write to objects into equally sized stripe units, except for the last stripe unit. A stripe width, should be a fraction of the Object Size so that an object may contain many stripe units." + "\ufffd/#75\ufffd" + "",P="-- Select stripe unit --",$="Stripe count" + "\ufffd#86\ufffd" + "The Ceph Client writes a sequence of stripe units over a series of objects determined by the stripe count. The series of objects is called an object set. After the Ceph Client writes to the last object in the object set, it returns to the first object in the object set." + "\ufffd/#86\ufffd" + "",y="" + "\ufffd0\ufffd" + " from",Q="This field is required.",Y="'/' and '@' are not allowed.",ee="Loading...",te="-- No rbd pools available --",w="-- Select a pool --",_e="This field is required.",ae="Loading...",O="-- No namespaces available --",me="-- Select a namespace --",ge="You need more than one pool with the rbd application label use to use a dedicated data pool.",Te="Data pool",fe="Dedicated pool that stores the object-data of the RBD.",Ce="Loading...",Se="-- No data pools available --",Re="This field is required.",G="This field is required.",Ge="You have to increase the size.",ye="Size must be a number or in a valid format. eg: 5 GiB",xe="You need to enable a " + "\ufffd#3\ufffd" + "mirror mode" + "\ufffd/#3\ufffd" + " in the selected pool. Please " + "\ufffd#4\ufffd" + "click here to select a mode and enable it in this pool." + "\ufffd/#4\ufffd" + "",Ze="You need to enable " + "\ufffd#3\ufffd" + "image mirror mode" + "\ufffd/#3\ufffd" + " in the selected pool. Please " + "\ufffd#4\ufffd" + "click here to select a mode and enable it in this pool." + "\ufffd/#4\ufffd" + "",we="Create Mirror-Snapshots automatically on a periodic basis. The interval can be specified in days, hours, or minutes using d, h, m suffix respectively. To create mirror snapshots, you must import or create and have available peers to mirror",He="Schedule Interval " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + "",ke="e.g., 12h or 1d or 10m",Ke="Advanced...",qe="This field is required because stripe count is defined!",Xe="Stripe unit is greater than object size.",Qe="This field is required because stripe unit is defined!",ze="Stripe count must be greater than 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","rbdForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],s,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Name...","id","name","name","name","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"form-group","row",3,"change"],["for","pool",1,"cd-col-form-label",3,"ngClass"],o,["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-select","formControlName","pool",3,"change",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","id","useDataPool","name","useDataPool","formControlName","useDataPool",1,"custom-control-input",3,"change"],["for","useDataPool",1,"custom-control-label"],i,[4,"ngIf"],["for","size",1,"cd-col-form-label","required"],_,["id","size","name","size","type","text","formControlName","size","placeholder",a,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["formGroupName","features",1,"form-group","row"],["for","features",1,"cd-col-form-label"],l,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],["type","checkbox","id","mirroring","name","mirroring","formControlName","mirroring",1,"custom-control-input",3,"change"],["for","mirroring",1,"custom-control-label"],[1,"row"],[1,"col-sm-12"],["class","float-end margin-right-md","href","",3,"click",4,"ngIf"],[3,"hidden"],[1,"cd-header"],d,[1,"col-md-12"],g,["for","size",1,"cd-col-form-label"],S,["id","obj_size","name","obj_size","formControlName","obj_size",1,"form-select"],[3,"value",4,"ngFor","ngForOf"],["for","stripingUnit",1,"cd-col-form-label",3,"ngClass"],I,["id","stripingUnit","name","stripingUnit","formControlName","stripingUnit",1,"form-select"],[3,"ngValue"],P,["for","stripingCount",1,"cd-col-form-label",3,"ngClass"],$,["id","stripingCount","name","stripingCount","formControlName","stripingCount","type","number",1,"form-control"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","name",1,"cd-col-form-label"],y,["type","text","id","parent","name","parent","formControlName","parent",1,"form-control"],[1,"invalid-feedback"],Q,Y,["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-select",3,"change"],[3,"ngValue",4,"ngIf"],ee,te,w,[3,"value"],_e,[3,"ngClass"],["for","pool",1,"cd-col-form-label"],["class","form-control","type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",4,"ngIf"],["id","namespace","name","namespace","class","form-select","formControlName","namespace",4,"ngIf"],["type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",1,"form-control"],["id","namespace","name","namespace","formControlName","namespace",1,"form-select"],ae,O,me,ge,["for","dataPool",1,"cd-col-form-label"],Te,["html",fe],["class","form-control","type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",4,"ngIf"],["id","dataPool","name","dataPool","class","form-select","formControlName","dataPool",3,"change",4,"ngIf"],["type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",1,"form-control"],["id","dataPool","name","dataPool","formControlName","dataPool",1,"form-select",3,"change"],Ce,Se,Re,G,Ge,ye,["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],[3,"html",4,"ngIf"],[3,"html"],xe,[3,"routerLink"],["class","custom-control custom-radio ms-2",4,"ngFor","ngForOf"],[1,"custom-control","custom-radio","ms-2"],["type","radio","name","mirroringMode","formControlName","mirroringMode",1,"form-check-input",3,"id","value","change"],[1,"form-check-label",3,"for"],Ze,[1,"cd-col-form-label"],He,["html",we],["id","schedule","name","schedule","type","text","formControlName","schedule","placeholder",ke,1,"form-control"],["href","",1,"float-end","margin-right-md",3,"click"],Ke,qe,Xe,Qe,ze]},template:function(t,o){1&t&&e.YNc(0,p_,96,50,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},dependencies:[c.mk,c.sg,c.O5,r._Y,r.YN,r.Kr,r.Fj,r.wV,r.Wl,r.EJ,r._,r.JJ,r.JL,r.sg,r.u,r.x0,bt.S,A.p,Oe.U,Ss.Q,ft.y,M.o,B.b,K.P,J.V,m.rH,Rs.d,c.rS,ot.m]}),n})();var Lt=p(71225),ct=p(36169),u_=p(72427),dt=p(51847),m_=p(16738),he=p.n(m_),pt=p(62862),g_=p(52266);function T_(n,s){1&n&&(e.TgZ(0,"div",18)(1,"span"),e.SDv(2,19),e.qZA()())}function f_(n,s){1&n&&(e.TgZ(0,"span",20),e.SDv(1,21),e.qZA())}function C_(n,s){1&n&&(e.TgZ(0,"span",20),e.SDv(1,22),e.qZA())}function S_(n,s){if(1&n&&e._UZ(0,"cd-date-time-picker",23),2&n){const t=e.oxw();e.Q6J("control",t.moveForm.get("expiresAt"))}}let R_=(()=>{class n{constructor(t,o,i,_,a){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=_,this.taskWrapper=a,this.createForm()}createForm(){this.moveForm=this.fb.group({expiresAt:["",[z.h.custom("format",t=>!(""===t||he()(t,"YYYY-MM-DD HH:mm:ss").isValid())),z.h.custom("expired",t=>he()().isAfter(t))]]})}ngOnInit(){this.imageSpec=new v.N(this.poolName,this.namespace,this.imageName),this.imageSpecStr=this.imageSpec.toString(),this.pattern=`${this.poolName}/${this.imageName}`}moveImage(){let t=0;const o=this.moveForm.getValue("expiresAt");o&&(t=he()(o,"YYYY-MM-DD HH:mm:ss").diff(he()(),"seconds",!0)),t<0&&(t=0),this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/trash/move",{image_spec:this.imageSpecStr}),call:this.rbdService.moveTrash(this.imageSpec,t)}).subscribe({complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(H),e.Y36(F.Kz),e.Y36(L.p4),e.Y36(pt.O),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-move-modal"]],decls:23,vars:9,consts:function(){let s,t,o,i,_,a,l;return s="Move an image to trash",t="To move " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " to trash, click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Move" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ". Optionally, you can pick an expiration date.",t=e.Zx4(t),o="Protection expires at",i="NOT PROTECTED",_="This image contains snapshot(s), which will prevent it from being removed after moved to trash.",a="Wrong date format. Please use \"YYYY-MM-DD HH:mm:ss\".",l="Protection has already expired. Please pick a future date or leave it empty.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","moveForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],["class","alert alert-warning","role","alert",4,"ngIf"],t,[1,"form-group"],["for","expiresAt",1,"col-form-label"],o,["type","text","placeholder",i,"formControlName","expiresAt","triggers","manual",1,"form-control",3,"ngbPopover","click","keypress"],["p","ngbPopover"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["popContent",""],["role","alert",1,"alert","alert-warning"],_,[1,"invalid-feedback"],a,l,[3,"control"]]},template:function(t,o){if(1&t){const i=e.EpF();e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6),e.YNc(7,T_,3,0,"div",7),e.TgZ(8,"p"),e.tHW(9,8),e._UZ(10,"kbd")(11,"kbd"),e.N_p(),e.qZA(),e.TgZ(12,"div",9)(13,"label",10),e.SDv(14,11),e.qZA(),e.TgZ(15,"input",12,13),e.NdJ("click",function(){e.CHM(i);const a=e.MAs(16);return e.KtG(a.open())})("keypress",function(){e.CHM(i);const a=e.MAs(16);return e.KtG(a.close())}),e.qZA(),e.YNc(17,f_,2,0,"span",14),e.YNc(18,C_,2,0,"span",14),e.qZA()(),e.TgZ(19,"div",15)(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return o.moveImage()}),e.qZA()()(),e.BQk(),e.qZA(),e.YNc(21,S_,1,1,"ng-template",null,17,e.W1O)}if(2&t){const i=e.MAs(5),_=e.MAs(22);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.moveForm),e.xp6(3),e.Q6J("ngIf",o.hasSnapshots),e.xp6(4),e.pQV(o.imageSpecStr),e.QtT(9),e.xp6(4),e.Q6J("ngbPopover",_),e.xp6(2),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"format")),e.xp6(1),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"expired")),e.xp6(2),e.Q6J("form",o.moveForm)("submitText",o.actionLabels.MOVE)}},dependencies:[c.O5,r._Y,r.Fj,r.JJ,r.JL,r.sg,r.u,F.o8,f.z,g_.J,A.p,M.o,B.b,K.P,J.V]}),n})();var E_=p(60251),vt=p(76317),M_=p(25917),$t=p(51295),ut=p(60737),O_=p(74255),Bt=p(71099),Gt=p(79765);function h_(n,s){1&n&&(e.TgZ(0,"span",16),e.SDv(1,17),e.qZA())}function A_(n,s){if(1&n&&(e.TgZ(0,"span"),e.tHW(1,18),e._UZ(2,"b"),e.N_p(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.imageName),e.QtT(1)}}function P_(n,s){1&n&&(e.TgZ(0,"cd-helper"),e.SDv(1,25),e.qZA())}function b_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",7)(1,"div",20)(2,"div",21)(3,"input",22),e.NdJ("change",function(){e.CHM(t);const i=e.oxw(2);return e.KtG(i.onMirrorCheckBoxChange())}),e.qZA(),e.TgZ(4,"label",23),e.SDv(5,24),e.qZA(),e.YNc(6,P_,2,0,"cd-helper",13),e.qZA()()()}if(2&n){const t=s.ngIf;e.xp6(3),e.uIk("disabled",!(t.length>0)||null),e.xp6(3),e.Q6J("ngIf",!t.length>0)}}function I_(n,s){if(1&n&&(e.ynx(0),e.YNc(1,b_,7,2,"div",19),e.ALo(2,"async"),e.BQk()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf",e.lcZ(2,1,t.peerConfigured$))}}let N_=(()=>{class n{constructor(t,o,i,_,a,l){this.activeModal=t,this.rbdService=o,this.taskManagerService=i,this.notificationService=_,this.actionLabels=a,this.rbdMirrorService=l,this.editing=!1,this.onSubmit=new Gt.xQ,this.action=this.actionLabels.CREATE,this.resource="RBD Snapshot",this.createForm()}createForm(){this.snapshotForm=new Z.d({snapshotName:new r.p4("",{validators:[r.kI.required]}),mirrorImageSnapshot:new r.p4(!1,{})})}ngOnInit(){this.peerConfigured$=this.rbdMirrorService.getPeerForPool(this.poolName)}setSnapName(t){this.snapName=t,this.snapshotForm.get("snapshotName").setValue(t)}onMirrorCheckBoxChange(){!0===this.snapshotForm.getValue("mirrorImageSnapshot")?(this.snapshotForm.get("snapshotName").setValue(""),this.snapshotForm.get("snapshotName").clearValidators()):(this.snapshotForm.get("snapshotName").setValue(this.snapName),this.snapshotForm.get("snapshotName").setValidators([r.kI.required]),this.snapshotForm.get("snapshotName").updateValueAndValidity())}setEditing(t=!0){this.editing=t,this.action=this.editing?this.actionLabels.RENAME:this.actionLabels.CREATE}editAction(){const t=this.snapshotForm.getValue("snapshotName"),o=new v.N(this.poolName,this.namespace,this.imageName),i=new E.R;i.name="rbd/snap/edit",i.metadata={image_spec:o.toString(),snapshot_name:t},this.rbdService.renameSnapshot(o,this.snapName,t).toPromise().then(()=>{this.taskManagerService.subscribe(i.name,i.metadata,_=>{this.notificationService.notifyTask(_)}),this.activeModal.close(),this.onSubmit.next(this.snapName)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}createAction(){const t=this.snapshotForm.getValue("snapshotName"),o=this.snapshotForm.getValue("mirrorImageSnapshot"),i=new v.N(this.poolName,this.namespace,this.imageName),_=new E.R;_.name="rbd/snap/create",_.metadata={image_spec:i.toString(),snapshot_name:t},this.rbdService.createSnapshot(i,t,o).toPromise().then(()=>{this.taskManagerService.subscribe(_.name,_.metadata,a=>{this.notificationService.notifyTask(a)}),this.activeModal.close(),this.onSubmit.next(t)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}submit(){this.editing?this.editAction():this.createAction()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(H),e.Y36(Bt.k),e.Y36(ve.g),e.Y36(L.p4),e.Y36(q))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-form-modal"]],decls:21,vars:18,consts:function(){let s,t,o,i,_,a;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="This field is required.",i="Snapshot mode is enabled on image " + "\ufffd#2\ufffd" + "" + "\ufffd0\ufffd" + "" + "\ufffd/#2\ufffd" + ": snapshot names are auto generated",_="Mirror Image Snapshot",a="The peer must be registered to do this action.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","snapshotForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","snapshotName",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Snapshot name...","id","snapshotName","name","snapshotName","formControlName","snapshotName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],o,i,["class","form-group row",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","formControlName","mirrorImageSnapshot","name","mirrorImageSnapshot","id","mirrorImageSnapshot",1,"custom-control-input",3,"change"],["for","mirrorImageSnapshot",1,"custom-control-label"],_,a]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,h_,2,0,"span",12),e.YNc(15,A_,3,1,"span",13),e.qZA()(),e.YNc(16,I_,3,3,"ng-container",13),e.qZA(),e.TgZ(17,"div",14)(18,"cd-form-button-panel",15),e.NdJ("submitActionEvent",function(){return o.submit()}),e.ALo(19,"titlecase"),e.ALo(20,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,10,o.action))(e.lcZ(4,12,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.snapshotForm),e.xp6(7),e.uIk("disabled","snapshot"===o.mirroring&&!0===o.snapshotForm.getValue("mirrorImageSnapshot")||null),e.xp6(1),e.Q6J("ngIf",o.snapshotForm.showError("snapshotName",i,"required")),e.xp6(1),e.Q6J("ngIf","snapshot"===o.mirroring&&!0===o.snapshotForm.getValue("mirrorImageSnapshot")||null),e.xp6(1),e.Q6J("ngIf","snapshot"===o.mirroring||null),e.xp6(2),e.Q6J("form",o.snapshotForm)("submitText",e.lcZ(19,14,o.action)+" "+e.lcZ(20,16,o.resource))}},dependencies:[c.O5,r._Y,r.Fj,r.Wl,r.JJ,r.JL,r.sg,r.u,bt.S,f.z,A.p,Oe.U,M.o,B.b,K.P,J.V,c.Ov,c.rS,ot.m]}),n})();class F_{constructor(s,t,o){this.featuresName=t,this.cloneFormatVersion=1,o.cloneFormatVersion().subscribe(i=>{this.cloneFormatVersion=i}),this.create={permission:"create",icon:T.P.add,name:s.CREATE},this.rename={permission:"update",icon:T.P.edit,name:s.RENAME,disable:i=>this.disableForMirrorSnapshot(i)||!i.hasSingleSelection},this.protect={permission:"update",icon:T.P.lock,visible:i=>i.hasSingleSelection&&!i.first().is_protected,name:s.PROTECT,disable:i=>this.disableForMirrorSnapshot(i)||this.getProtectDisableDesc(i,this.featuresName)},this.unprotect={permission:"update",icon:T.P.unlock,visible:i=>i.hasSingleSelection&&i.first().is_protected,name:s.UNPROTECT,disable:i=>this.disableForMirrorSnapshot(i)},this.clone={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>this.getCloneDisableDesc(i)||this.disableForMirrorSnapshot(i),icon:T.P.clone,name:s.CLONE},this.copy={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>!i.hasSingleSelection||i.first().cdExecuting||this.disableForMirrorSnapshot(i),icon:T.P.copy,name:s.COPY},this.rollback={permission:"update",icon:T.P.undo,name:s.ROLLBACK,disable:i=>this.disableForMirrorSnapshot(i)||!i.hasSingleSelection},this.deleteSnap={permission:"delete",icon:T.P.destroy,disable:i=>{const _=i.first();return!i.hasSingleSelection||_.cdExecuting||_.is_protected||this.disableForMirrorSnapshot(i)},name:s.DELETE},this.ordering=[this.create,this.rename,this.protect,this.unprotect,this.clone,this.copy,this.rollback,this.deleteSnap]}getProtectDisableDesc(s,t){return!(s.hasSingleSelection&&!s.first().cdExecuting)||!t?.includes("layering")&&"The layering feature needs to be enabled on parent image"}getCloneDisableDesc(s){return!(s.hasSingleSelection&&!s.first().cdExecuting)||1===this.cloneFormatVersion&&!s.first().is_protected&&"Snapshot must be protected in order to clone."}disableForMirrorSnapshot(s){return s.hasSingleSelection&&"snapshot"===s.first().mirror_mode&&s.first().name.includes(".mirror.")}}class D_{}var Be=p(96102);const L_=["nameTpl"],v_=["rollbackTpl"];function $_(n,s){if(1&n&&(e.ynx(0),e.SDv(1,3),e.BQk(),e.TgZ(2,"strong"),e._uU(3),e.qZA(),e._uU(4,".\n")),2&n){const t=s.$implicit;e.xp6(3),e.hij(" ",t.snapName,"")}}let B_=(()=>{class n{constructor(t,o,i,_,a,l,d,g,S,I,P){this.authStorageService=t,this.modalService=o,this.dimlessBinaryPipe=i,this.cdDatePipe=_,this.rbdService=a,this.taskManagerService=l,this.notificationService=d,this.summaryService=g,this.taskListService=S,this.actionLabels=I,this.cdr=P,this.snapshots=[],this.selection=new Ee.r,this.builders={"rbd/snap/create":$=>{const y=new D_;return y.name=$.snapshot_name,y}},this.permission=this.authStorageService.getPermissions().rbdImage}ngOnInit(){this.columns=[{name:"Name",prop:"name",cellTransformation:Le.e.executing,flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"Used",prop:"disk_usage",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"State",prop:"is_protected",flexGrow:1,cellTransformation:Le.e.badge,customTemplateConfig:{map:{true:{value:"PROTECTED",class:"badge-success"},false:{value:"UNPROTECTED",class:"badge-info"}}}},{name:"Created",prop:"timestamp",flexGrow:1,pipe:this.cdDatePipe}],this.imageSpec=new v.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions=new F_(this.actionLabels,this.featuresName,this.rbdService),this.rbdTableActions.create.click=()=>this.openCreateSnapshotModal(),this.rbdTableActions.rename.click=()=>this.openEditSnapshotModal(),this.rbdTableActions.protect.click=()=>this.toggleProtection(),this.rbdTableActions.unprotect.click=()=>this.toggleProtection();const t=()=>this.selection.first()&&`${this.imageSpec.toStringEncoded()}/${encodeURIComponent(this.selection.first().name)}`;this.rbdTableActions.clone.routerLink=()=>`/block/rbd/clone/${t()}`,this.rbdTableActions.copy.routerLink=()=>`/block/rbd/copy/${t()}`,this.rbdTableActions.rollback.click=()=>this.rollbackModal(),this.rbdTableActions.deleteSnap.click=()=>this.deleteSnapshotModal(),this.tableActions=this.rbdTableActions.ordering,this.taskListService.init(()=>(0,M_.of)(this.snapshots),null,_=>{$t.T.updateChanged(this,{data:_})&&(this.cdr.detectChanges(),this.data=[...this.data])},()=>{$t.T.updateChanged(this,{data:this.snapshots})&&(this.cdr.detectChanges(),this.data=[...this.data])},_=>["rbd/snap/create","rbd/snap/delete","rbd/snap/edit","rbd/snap/rollback"].includes(_.name)&&this.imageSpec.toString()===_.metadata.image_spec,(_,a)=>_.name===a.metadata.snapshot_name,this.builders)}ngOnChanges(){this.columns&&(this.imageSpec=new v.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions&&(this.rbdTableActions.featuresName=this.featuresName),this.taskListService.fetch())}openSnapshotModal(t,o=null){this.modalRef=this.modalService.show(N_,{mirroring:this.mirroring}),this.modalRef.componentInstance.poolName=this.poolName,this.modalRef.componentInstance.imageName=this.rbdName,this.modalRef.componentInstance.namespace=this.namespace,o?this.modalRef.componentInstance.setEditing():o=`${this.rbdName}_${he()().toISOString(!0)}`,this.modalRef.componentInstance.setSnapName(o),this.modalRef.componentInstance.onSubmit.subscribe(_=>{const a=new ut.o;a.name=t,a.metadata={image_spec:this.imageSpec.toString(),snapshot_name:_},this.summaryService.addRunningTask(a)})}openCreateSnapshotModal(){this.openSnapshotModal("rbd/snap/create")}openEditSnapshotModal(){this.openSnapshotModal("rbd/snap/edit",this.selection.first().name)}toggleProtection(){const t=this.selection.first().name,o=this.selection.first().is_protected,i=new E.R;i.name="rbd/snap/edit";const _=new v.N(this.poolName,this.namespace,this.rbdName);i.metadata={image_spec:_.toString(),snapshot_name:t},this.rbdService.protectSnapshot(_,t,!o).toPromise().then(()=>{const a=new ut.o;a.name=i.name,a.metadata=i.metadata,this.summaryService.addRunningTask(a),this.taskManagerService.subscribe(i.name,i.metadata,l=>{this.notificationService.notifyTask(l)})})}_asyncTask(t,o,i){const _=new E.R;_.name=o,_.metadata={image_spec:new v.N(this.poolName,this.namespace,this.rbdName).toString(),snapshot_name:i};const a=new v.N(this.poolName,this.namespace,this.rbdName);this.rbdService[t](a,i).toPromise().then(()=>{const l=new ut.o;l.name=_.name,l.metadata=_.metadata,this.summaryService.addRunningTask(l),this.modalRef.close(),this.taskManagerService.subscribe(l.name,l.metadata,d=>{this.notificationService.notifyTask(d)})}).catch(()=>{this.modalRef.componentInstance.stopLoadingSpinner()})}rollbackModal(){const t=this.selection.selected[0].name,o=new v.N(this.poolName,this.namespace,this.rbdName).toString(),i={titleText:"RBD snapshot rollback",buttonText:"Rollback",bodyTpl:this.rollbackTpl,bodyData:{snapName:`${o}@${t}`},onSubmit:()=>{this._asyncTask("rollbackSnapshot","rbd/snap/rollback",t)}};this.modalRef=this.modalService.show(ct.Y,i)}deleteSnapshotModal(){const t=this.selection.selected[0].name;this.modalRef=this.modalService.show(ue.M,{itemDescription:"RBD snapshot",itemNames:[t],submitAction:()=>this._asyncTask("deleteSnapshot","rbd/snap/delete",t)})}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(pe.Z),e.Y36(Ue.$),e.Y36(Be.N),e.Y36(H),e.Y36(Bt.k),e.Y36(ve.g),e.Y36(O_.J),e.Y36(de.j),e.Y36(L.p4),e.Y36(e.sBO))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(L_,5),e.Gf(v_,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.rollbackTpl=i.first)}},inputs:{snapshots:"snapshots",featuresName:"featuresName",poolName:"poolName",namespace:"namespace",mirroring:"mirroring",primary:"primary",rbdName:"rbdName"},features:[e._Bn([de.j]),e.TTD],decls:4,vars:5,consts:function(){let s;return s="You are about to rollback",[["columnMode","flex","selectionType","single",3,"data","columns","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["rollbackTpl",""],s]},template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,$_,5,1,"ng-template",null,2,e.W1O)),2&t&&(e.Q6J("data",o.data)("columns",o.columns),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},dependencies:[W.a,Me.K],changeDetection:0}),n})();var G_=p(42176),y_=p(41039);const x_=["poolConfigurationSourceTpl"];function Z_(n,s){1&n&&(e.ynx(0),e.tHW(1,3),e._UZ(2,"strong"),e.N_p(),e.BQk())}function w_(n,s){if(1&n&&(e.TgZ(0,"span")(1,"span",38),e._uU(2),e.qZA()()),2&n){const t=s.$implicit;e.xp6(2),e.Oqu(t)}}function H_(n,s){if(1&n&&(e.TgZ(0,"span")(1,"span",39),e.SDv(2,40),e.qZA()()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function k_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.disk_usage)," ")}}function K_(n,s){if(1&n&&(e.TgZ(0,"span")(1,"span",39),e.SDv(2,41),e.qZA()()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function q_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.total_disk_usage)," ")}}function X_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(4);e.xp6(1),e.hij("/",t.selection.parent.pool_namespace,"")}}function Q_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,X_,2,1,"span",1),e._uU(3),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Oqu(t.selection.parent.pool_name),e.xp6(1),e.Q6J("ngIf",t.selection.parent.pool_namespace),e.xp6(1),e.AsE("/",t.selection.parent.image_name,"@",t.selection.parent.snap_name,"")}}function z_(n,s){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function J_(n,s){if(1&n&&(e.TgZ(0,"table",17)(1,"tbody")(2,"tr")(3,"td",18),e.SDv(4,19),e.qZA(),e.TgZ(5,"td",20),e._uU(6),e.qZA()(),e.TgZ(7,"tr")(8,"td",21),e.SDv(9,22),e.qZA(),e.TgZ(10,"td"),e._uU(11),e.qZA()(),e.TgZ(12,"tr")(13,"td",21),e.SDv(14,23),e.qZA(),e.TgZ(15,"td"),e._uU(16),e.ALo(17,"empty"),e.qZA()(),e.TgZ(18,"tr")(19,"td",21),e.SDv(20,24),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.ALo(23,"cdDate"),e.qZA()(),e.TgZ(24,"tr")(25,"td",21),e.SDv(26,25),e.qZA(),e.TgZ(27,"td"),e._uU(28),e.ALo(29,"dimlessBinary"),e.qZA()(),e.TgZ(30,"tr")(31,"td",21),e.SDv(32,26),e.qZA(),e.TgZ(33,"td"),e._uU(34),e.ALo(35,"dimless"),e.qZA()(),e.TgZ(36,"tr")(37,"td",21),e.SDv(38,27),e.qZA(),e.TgZ(39,"td"),e._uU(40),e.ALo(41,"dimlessBinary"),e.qZA()(),e.TgZ(42,"tr")(43,"td",21),e.SDv(44,28),e.qZA(),e.TgZ(45,"td"),e.YNc(46,w_,3,1,"span",29),e.qZA()(),e.TgZ(47,"tr")(48,"td",21),e.SDv(49,30),e.qZA(),e.TgZ(50,"td"),e.YNc(51,H_,3,1,"span",1),e.YNc(52,k_,3,3,"span",1),e.qZA()(),e.TgZ(53,"tr")(54,"td",21),e.SDv(55,31),e.qZA(),e.TgZ(56,"td"),e.YNc(57,K_,3,1,"span",1),e.YNc(58,q_,3,3,"span",1),e.qZA()(),e.TgZ(59,"tr")(60,"td",21),e.SDv(61,32),e.qZA(),e.TgZ(62,"td"),e._uU(63),e.ALo(64,"dimlessBinary"),e.qZA()(),e.TgZ(65,"tr")(66,"td",21),e.SDv(67,33),e.qZA(),e.TgZ(68,"td"),e._uU(69),e.qZA()(),e.TgZ(70,"tr")(71,"td",21),e.SDv(72,34),e.qZA(),e.TgZ(73,"td"),e.YNc(74,Q_,4,4,"span",1),e.YNc(75,z_,2,0,"span",1),e.qZA()(),e.TgZ(76,"tr")(77,"td",21),e.SDv(78,35),e.qZA(),e.TgZ(79,"td"),e._uU(80),e.qZA()(),e.TgZ(81,"tr")(82,"td",21),e.SDv(83,36),e.qZA(),e.TgZ(84,"td"),e._uU(85),e.qZA()(),e.TgZ(86,"tr")(87,"td",21),e.SDv(88,37),e.qZA(),e.TgZ(89,"td"),e._uU(90),e.qZA()()()()),2&n){const t=e.oxw(2);e.xp6(6),e.Oqu(t.selection.name),e.xp6(5),e.Oqu(t.selection.pool_name),e.xp6(5),e.Oqu(e.lcZ(17,19,t.selection.data_pool)),e.xp6(6),e.Oqu(e.lcZ(23,21,t.selection.timestamp)),e.xp6(6),e.Oqu(e.lcZ(29,23,t.selection.size)),e.xp6(6),e.Oqu(e.lcZ(35,25,t.selection.num_objs)),e.xp6(6),e.Oqu(e.lcZ(41,27,t.selection.obj_size)),e.xp6(6),e.Q6J("ngForOf",t.selection.features_name),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Oqu(e.lcZ(64,29,t.selection.stripe_unit)),e.xp6(6),e.Oqu(t.selection.stripe_count),e.xp6(5),e.Q6J("ngIf",t.selection.parent),e.xp6(1),e.Q6J("ngIf",!t.selection.parent),e.xp6(5),e.Oqu(t.selection.block_name_prefix),e.xp6(5),e.Oqu(t.selection.order),e.xp6(5),e.Oqu(t.selection.image_format)}}function Y_(n,s){if(1&n&&e._UZ(0,"cd-rbd-snapshot-list",42),2&n){const t=e.oxw(2);e.Q6J("snapshots",t.selection.snapshots)("featuresName",t.selection.features_name)("poolName",t.selection.pool_name)("primary",t.selection.primary)("namespace",t.selection.namespace)("mirroring",t.selection.mirror_mode)("rbdName",t.selection.name)}}function V_(n,s){if(1&n&&e._UZ(0,"cd-rbd-configuration-table",43),2&n){const t=e.oxw(2);e.Q6J("data",t.selection.configuration)}}function U_(n,s){if(1&n&&e._UZ(0,"cd-grafana",44),2&n){const t=e.oxw(2);e.Q6J("grafanaPath",t.rbdDashboardUrl)("type","metrics")}}function j_(n,s){if(1&n&&(e.ynx(0),e.TgZ(1,"nav",4,5),e.ynx(3,6),e.TgZ(4,"a",7),e.SDv(5,8),e.qZA(),e.YNc(6,J_,91,31,"ng-template",9),e.BQk(),e.ynx(7,10),e.TgZ(8,"a",7),e.SDv(9,11),e.qZA(),e.YNc(10,Y_,1,7,"ng-template",9),e.BQk(),e.ynx(11,12),e.TgZ(12,"a",7),e.SDv(13,13),e.qZA(),e.YNc(14,V_,1,1,"ng-template",9),e.BQk(),e.ynx(15,14),e.TgZ(16,"a",7),e.SDv(17,15),e.qZA(),e.YNc(18,U_,1,2,"ng-template",9),e.BQk(),e.qZA(),e._UZ(19,"div",16),e.BQk()),2&n){const t=e.MAs(2);e.xp6(19),e.Q6J("ngbNavOutlet",t)}}function W_(n,s){1&n&&(e.ynx(0),e.TgZ(1,"cd-alert-panel",45),e.SDv(2,46),e.qZA(),e.BQk())}function ea(n,s){1&n&&(e.ynx(0),e.TgZ(1,"strong",49),e.SDv(2,50),e.qZA(),e.BQk())}function ta(n,s){1&n&&(e.TgZ(0,"span",51),e.SDv(1,52),e.qZA())}function oa(n,s){if(1&n&&(e.YNc(0,ea,3,0,"ng-container",47),e.YNc(1,ta,2,0,"ng-template",null,48,e.W1O)),2&n){const t=s.value,o=e.MAs(2);e.Q6J("ngIf",+t)("ngIfElse",o)}}let na=(()=>{class n{ngOnChanges(){this.selection&&(this.rbdDashboardUrl=`rbd-details?var-Pool=${this.selection.pool_name}&var-Image=${this.selection.name}`)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(x_,7),e.Gf(F.Pz,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.poolConfigurationSourceTpl=i.first),e.iGM(i=e.CRH())&&(o.nav=i.first)}},inputs:{selection:"selection",images:"images"},features:[e.TTD],decls:6,vars:2,consts:function(){let s,t,o,i,_,a,l,d,g,S,I,P,$,y,Q,Y,ee,te,w,_e,ae,O,me,ge,Te,fe,Ce,Se,Re;return s="Only available for RBD images with " + "\ufffd#2\ufffd" + "fast-diff" + "\ufffd/#2\ufffd" + " enabled",t="Details",o="Snapshots",i="Configuration",_="Performance",a="Name",l="Pool",d="Data Pool",g="Created",S="Size",I="Objects",P="Object size",$="Features",y="Provisioned",Q="Total provisioned",Y="Striping unit",ee="Striping count",te="Parent",w="Block name prefix",_e="Order",ae="Format Version",O="N/A",me="N/A",ge="RBD details",Te="Information can not be displayed for RBD in status 'Removing'.",fe="This setting overrides the global value",Ce="Image",Se="This is the global value. No value for this option has been set for this image.",Re="Global",[["usageNotAvailableTooltipTpl",""],[4,"ngIf"],["poolConfigurationSourceTpl",""],s,["ngbNav","","cdStatefulTab","rbd-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],t,["ngbNavContent",""],["ngbNavItem","snapshots"],o,["ngbNavItem","configuration"],i,["ngbNavItem","performance"],_,[3,"ngbNavOutlet"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],a,[1,"w-75"],[1,"bold"],l,d,g,S,I,P,$,[4,"ngFor","ngForOf"],y,Q,Y,ee,te,w,_e,ae,[1,"badge","badge-dark","me-2"],["placement","top",1,"form-text","text-muted",3,"ngbTooltip"],O,me,[3,"snapshots","featuresName","poolName","primary","namespace","mirroring","rbdName"],[3,"data"],["title",ge,"uid","YhCYGcuZz","grafanaStyle","one",3,"grafanaPath","type"],["type","warning"],Te,[4,"ngIf","ngIfElse"],["global",""],["ngbTooltip",fe],Ce,["ngbTooltip",Se],Re]},template:function(t,o){1&t&&(e.YNc(0,Z_,3,0,"ng-template",null,0,e.W1O),e.YNc(2,j_,20,1,"ng-container",1),e.YNc(3,W_,3,0,"ng-container",1),e.YNc(4,oa,3,2,"ng-template",null,2,e.W1O)),2&t&&(e.xp6(2),e.Q6J("ngIf",o.selection&&"REMOVING"!==o.selection.source),e.xp6(1),e.Q6J("ngIf",o.selection&&"REMOVING"===o.selection.source))},dependencies:[c.sg,c.O5,F.uN,F.Pz,F.nv,F.Vx,F.tO,F.Dy,F._L,vt.F,it.G,ht.m,B_,G_.P,Ue.$,st.n,Be.N,y_.W]}),n})();const et=function(){return{exact:!0}};function ia(n,s){1&n&&(e.TgZ(0,"li",1)(1,"a",9),e.SDv(2,10),e.qZA()()),2&n&&(e.xp6(1),e.Q6J("routerLinkActiveOptions",e.DdM(1,et)))}let tt=(()=>{class n{constructor(t){this.authStorageService=t,this.grafanaPermission=this.authStorageService.getPermissions().grafana}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-tabs"]],decls:11,vars:7,consts:function(){let s,t,o,i;return s="Images",t="Namespaces",o="Trash",i="Overall Performance",[[1,"nav","nav-tabs"],[1,"nav-item"],["routerLink","/block/rbd","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],s,["routerLink","/block/rbd/namespaces","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],t,["routerLink","/block/rbd/trash","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],o,["class","nav-item",4,"ngIf"],["routerLink","/block/rbd/performance","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],i]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0)(1,"li",1)(2,"a",2),e.SDv(3,3),e.qZA()(),e.TgZ(4,"li",1)(5,"a",4),e.SDv(6,5),e.qZA()(),e.TgZ(7,"li",1)(8,"a",6),e.SDv(9,7),e.qZA()(),e.YNc(10,ia,3,2,"li",8),e.qZA()),2&t&&(e.xp6(2),e.Q6J("routerLinkActiveOptions",e.DdM(4,et)),e.xp6(3),e.Q6J("routerLinkActiveOptions",e.DdM(5,et)),e.xp6(3),e.Q6J("routerLinkActiveOptions",e.DdM(6,et)),e.xp6(2),e.Q6J("ngIf",o.grafanaPermission.read))},dependencies:[c.O5,m.rH,m.Od]}),n})();const sa=["usageTpl"],_a=["parentTpl"],aa=["nameTpl"],ra=["ScheduleTpl"],la=["mirroringTpl"],ca=["flattenTpl"],da=["deleteTpl"],pa=["removingStatTpl"],ua=["forcePromoteConfirmation"],ma=["usedTmpl"],ga=["totalUsedTmpl"],Ta=["imageUsageTpl"];function fa(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(2).value;e.xp6(1),e.hij("/",t.pool_namespace,"")}}function Ca(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,fa,2,1,"span",13),e._uU(3),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t.pool_name),e.xp6(1),e.Q6J("ngIf",t.pool_namespace),e.xp6(1),e.AsE("/",t.image_name,"@",t.snap_name,"")}}function Sa(n,s){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function Ra(n,s){if(1&n&&(e.YNc(0,Ca,4,4,"span",13),e.YNc(1,Sa,2,0,"span",13)),2&n){const t=s.value;e.Q6J("ngIf",t),e.xp6(1),e.Q6J("ngIf",!t)}}function Ea(n,s){if(1&n&&(e.TgZ(0,"span",17),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t[0])}}function Ma(n,s){if(1&n&&(e.TgZ(0,"span",17),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t[1])}}function Oa(n,s){1&n&&(e.TgZ(0,"span",17),e.SDv(1,18),e.qZA())}function ha(n,s){1&n&&(e.TgZ(0,"span",17),e.SDv(1,19),e.qZA())}function Aa(n,s){if(1&n&&(e.TgZ(0,"span",17),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Pa(n,s){if(1&n&&(e.YNc(0,Ea,2,1,"span",14),e._uU(1,"\xa0 "),e.YNc(2,Ma,2,1,"span",15),e._uU(3,"\xa0 "),e.YNc(4,Oa,2,0,"span",15),e.YNc(5,ha,2,0,"span",15),e.YNc(6,Aa,2,1,"ng-template",null,16,e.W1O)),2&n){const t=s.value,o=s.row,i=e.MAs(7);e.Q6J("ngIf",3===t.length)("ngIfElse",i),e.xp6(2),e.Q6J("ngIf",3===t.length),e.xp6(2),e.Q6J("ngIf",!0===o.primary),e.xp6(1),e.Q6J("ngIf",!1===o.primary)}}function ba(n,s){if(1&n&&(e.TgZ(0,"span",17),e._uU(1),e.ALo(2,"cdDate"),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(e.lcZ(2,1,t[2]))}}function Ia(n,s){1&n&&e.YNc(0,ba,3,3,"span",15),2&n&&e.Q6J("ngIf",3===s.value.length)}function Na(n,s){if(1&n&&(e._uU(0," You are about to flatten "),e.TgZ(1,"strong"),e._uU(2),e.qZA(),e._uU(3,". "),e._UZ(4,"br")(5,"br"),e._uU(6," All blocks will be copied from parent "),e.TgZ(7,"strong"),e._uU(8),e.qZA(),e._uU(9," to child "),e.TgZ(10,"strong"),e._uU(11),e.qZA(),e._uU(12,".\n")),2&n){const t=s.$implicit;e.xp6(2),e.Oqu(t.child),e.xp6(6),e.Oqu(t.parent),e.xp6(3),e.Oqu(t.child)}}function Fa(n,s){if(1&n&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.Oqu(t)}}function Da(n,s){if(1&n&&(e.ynx(0),e.TgZ(1,"span"),e.SDv(2,23),e.qZA(),e.TgZ(3,"ul"),e.YNc(4,Fa,2,1,"li",24),e.qZA(),e.BQk()),2&n){const t=e.oxw(2).snapshots;e.xp6(4),e.Q6J("ngForOf",t)}}function La(n,s){if(1&n&&(e.TgZ(0,"div",21)(1,"span"),e.SDv(2,22),e.qZA(),e._UZ(3,"br"),e.YNc(4,Da,5,1,"ng-container",13),e.qZA()),2&n){const t=e.oxw().snapshots;e.xp6(4),e.Q6J("ngIf",t.length>0)}}function va(n,s){1&n&&e.YNc(0,La,5,1,"div",20),2&n&&e.Q6J("ngIf",s.hasSnapshots)}const $a=function(n,s){return[n,s]};function Ba(n,s){if(1&n&&e._UZ(0,"i",26),2&n){const t=e.oxw(2);e.Q6J("ngClass",e.WLB(1,$a,t.icons.spinner,t.icons.spin))}}function Ga(n,s){if(1&n&&(e.TgZ(0,"span",26),e._uU(1),e.qZA()),2&n){const t=e.oxw(),o=t.column,i=t.row;e.Q6J("ngClass",null!=o&&null!=o.customTemplateConfig&&o.customTemplateConfig.executingClass?o.customTemplateConfig.executingClass:"text-muted italic"),e.xp6(1),e.hij(" (",i.cdExecuting,") ")}}function ya(n,s){if(1&n&&e._UZ(0,"i",28),2&n){const t=e.oxw(2);e.Gre("",t.icons.warning," warn")}}function xa(n,s){if(1&n&&(e.YNc(0,Ba,1,4,"i",25),e.TgZ(1,"span",26),e._uU(2),e.qZA(),e.YNc(3,Ga,2,2,"span",25),e.YNc(4,ya,1,3,"i",27)),2&n){const t=s.column,o=s.value,i=s.row;e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngClass",null==t||null==t.customTemplateConfig?null:t.customTemplateConfig.valueClass),e.xp6(1),e.hij(" ",o," "),e.xp6(1),e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngIf",i.source&&"REMOVING"===i.source)}}function Za(n,s){if(1&n&&(e.TgZ(0,"cd-alert-panel",29),e._uU(1),e.qZA(),e.TgZ(2,"div",30),e.tHW(3,31),e._UZ(4,"strong"),e.N_p(),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Oqu(t.errorMessage)}}function wa(n,s){if(1&n&&(e.TgZ(0,"span",34)(1,"span"),e._uU(2,"-"),e.qZA()()),2&n){e.oxw(2);const t=e.MAs(22);e.Q6J("ngbTooltip",t)}}function Ha(n,s){if(1&n&&e._UZ(0,"cd-usage-bar",36),2&n){const t=e.oxw(2).row;e.Q6J("total",t.size)("used",t.disk_usage)("title",t.name)}}function ka(n,s){if(1&n&&e.YNc(0,Ha,1,3,"cd-usage-bar",35),2&n){const t=e.oxw().row;e.Q6J("ngIf",t)}}function Ka(n,s){if(1&n&&(e.YNc(0,wa,3,1,"span",32),e.YNc(1,ka,1,1,"ng-template",null,33,e.W1O)),2&n){const t=s.row,o=e.MAs(2);e.Q6J("ngIf",t.features_name&&(!t.features_name.includes("fast-diff")||"snapshot"===t.mirror_mode))("ngIfElse",o)}}function qa(n,s){1&n&&e._UZ(0,"div",37),2&n&&e.Q6J("innerHtml","Only available for RBD images with fast-diff enabled and without snapshot mirroring",e.oJD)}let Qa=(()=>{class n extends St.o{createRbdFromTaskImageSpec(t){const o=v.N.fromString(t);return this.createRbdFromTask(o.poolName,o.namespace,o.imageName)}createRbdFromTask(t,o,i){const _=new ms;return _.id="-1",_.unique_id="-1",_.name=i,_.namespace=o,_.pool_name=t,_.image_format=Ie.V2,_}constructor(t,o,i,_,a,l,d,g,S){super(),this.authStorageService=t,this.rbdService=o,this.dimlessBinaryPipe=i,this.dimlessPipe=_,this.modalService=a,this.taskWrapper=l,this.taskListService=d,this.urlBuilder=g,this.actionLabels=S,this.tableStatus=new Lt.c("light"),this.selection=new Ee.r,this.icons=T.P,this.count=0,this.tableContext=null,this.builders={"rbd/create":O=>this.createRbdFromTask(O.pool_name,O.namespace,O.image_name),"rbd/delete":O=>this.createRbdFromTaskImageSpec(O.image_spec),"rbd/clone":O=>this.createRbdFromTask(O.child_pool_name,O.child_namespace,O.child_image_name),"rbd/copy":O=>this.createRbdFromTask(O.dest_pool_name,O.dest_namespace,O.dest_image_name)},this.permission=this.authStorageService.getPermissions().rbdImage;const I=()=>this.selection.first()&&new v.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name).toStringEncoded();this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>this.urlBuilder.getCreate(),canBePrimary:O=>!O.hasSingleSelection,name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>this.urlBuilder.getEdit(I()),name:this.actionLabels.EDIT,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)},{permission:"create",canBePrimary:O=>O.hasSingleSelection,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||!!O.first().cdExecuting,icon:T.P.copy,routerLink:()=>`/block/rbd/copy/${I()}`,name:this.actionLabels.COPY},{permission:"update",disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||O.first().cdExecuting||!O.first().parent,icon:T.P.flatten,click:()=>this.flattenRbdModal(),name:this.actionLabels.FLATTEN},{permission:"update",icon:T.P.refresh,click:()=>this.resyncRbdModal(),name:this.actionLabels.RESYNC,disable:O=>this.getResyncDisableDesc(O)},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteRbdModal(),name:this.actionLabels.DELETE,disable:O=>this.getDeleteDisableDesc(O)},{permission:"delete",icon:T.P.trash,click:()=>this.trashRbdModal(),name:this.actionLabels.TRASH,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||O.first().image_format===Ie.V1},{permission:"update",icon:T.P.edit,click:()=>this.removeSchedulingModal(),name:this.actionLabels.REMOVE_SCHEDULING,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||void 0===O.first().schedule_info},{permission:"update",icon:T.P.edit,click:()=>this.actionPrimary(!0),name:this.actionLabels.PROMOTE,visible:()=>null!=this.selection.first()&&!this.selection.first().primary,disable:()=>"Disabled"===this.selection.first().mirror_mode?"Mirroring needs to be enabled on the image to perform this action":""},{permission:"update",icon:T.P.edit,click:()=>this.actionPrimary(!1),name:this.actionLabels.DEMOTE,visible:()=>null!=this.selection.first()&&this.selection.first().primary,disable:()=>"Disabled"===this.selection.first().mirror_mode?"Mirroring needs to be enabled on the image to perform this action":""}]}ngOnInit(){this.columns=[{name:"Name",prop:"name",flexGrow:2,cellTemplate:this.removingStatTpl},{name:"Pool",prop:"pool_name",flexGrow:2},{name:"Namespace",prop:"namespace",flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessBinaryPipe},{name:"Usage",prop:"usage",cellTemplate:this.imageUsageTpl,flexGrow:1.5},{name:"Objects",prop:"num_objs",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessPipe},{name:"Object size",prop:"obj_size",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessBinaryPipe},{name:"Parent",prop:"parent",flexGrow:2,sortable:!1,cellTemplate:this.parentTpl},{name:"Mirroring",prop:"mirror_mode",flexGrow:3,sortable:!1,cellTemplate:this.mirroringTpl},{name:"Next Scheduled Snapshot",prop:"mirror_mode",flexGrow:3,sortable:!1,cellTemplate:this.ScheduleTpl}],this.taskListService.init(i=>this.getRbdImages(i),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/clone","rbd/copy","rbd/create","rbd/delete","rbd/edit","rbd/flatten","rbd/trash/move"].includes(i.name),(i,_)=>{let a;switch(_.name){case"rbd/copy":a=new v.N(_.metadata.dest_pool_name,_.metadata.dest_namespace,_.metadata.dest_image_name).toString();break;case"rbd/clone":a=new v.N(_.metadata.child_pool_name,_.metadata.child_namespace,_.metadata.child_image_name).toString();break;case"rbd/create":a=new v.N(_.metadata.pool_name,_.metadata.namespace,_.metadata.image_name).toString();break;default:a=_.metadata.image_spec}return a===new v.N(i.pool_name,i.namespace,i.name).toString()},this.builders)}onFetchError(){this.table.reset(),this.tableStatus=new Lt.c("danger")}getRbdImages(t){return null!==t&&(this.tableContext=t),null==this.tableContext&&(this.tableContext=new h.E(()=>{})),this.rbdService.list(this.tableContext?.toParams())}prepareResponse(t){let o=[];return t.forEach(i=>{o=o.concat(i.value)}),o.forEach(i=>{if(void 0!==i.schedule_info){let _=[];const a="scheduled";let l=+new Date(i.schedule_info.schedule_time);const d=(new Date).getTimezoneOffset();l+=6e4*Math.abs(d),_.push(i.mirror_mode,a,l),i.mirror_mode=_,_=[]}}),this.count=o.length>0?u_.v.getCount(t[0]):0,o}updateSelection(t){this.selection=t}deleteRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=new v.N(t,o,i);this.modalRef=this.modalService.show(ue.M,{itemDescription:"RBD",itemNames:[_],bodyTemplate:this.deleteTpl,bodyContext:{hasSnapshots:this.hasSnapshots(),snapshots:this.listProtectedSnapshots()},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/delete",{image_spec:_.toString()}),call:this.rbdService.delete(_)})})}resyncRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=new v.N(t,o,i);this.modalRef=this.modalService.show(ue.M,{itemDescription:"RBD",itemNames:[_],actionDescription:"resync",submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/edit",{image_spec:_.toString()}),call:this.rbdService.update(_,{resync:!0})})})}trashRbdModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,hasSnapshots:this.hasSnapshots()};this.modalRef=this.modalService.show(R_,t)}flattenRbd(t){this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/flatten",{image_spec:t.toString()}),call:this.rbdService.flatten(t)}).subscribe({complete:()=>{this.modalRef.close()}})}flattenRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=this.selection.first().parent,a=new v.N(_.pool_name,_.pool_namespace,_.image_name),l=new v.N(t,o,i),d={titleText:"RBD flatten",buttonText:"Flatten",bodyTpl:this.flattenTpl,bodyData:{parent:`${a}@${_.snap_name}`,child:l.toString()},onSubmit:()=>{this.flattenRbd(l)}};this.modalRef=this.modalService.show(ct.Y,d)}editRequest(){const t=new lt;return t.remove_scheduling=!t.remove_scheduling,t}removeSchedulingModal(){const t=this.selection.first().name,o=new v.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name);this.modalRef=this.modalService.show(ue.M,{actionDescription:"remove scheduling on",itemDescription:"image",itemNames:[`${t}`],submitActionObservable:()=>new At.y(i=>{this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/edit",{image_spec:o.toString()}),call:this.rbdService.update(o,this.editRequest())}).subscribe({error:_=>i.error(_),complete:()=>{this.modalRef.close()}})})})}actionPrimary(t){const o=new lt;o.primary=t,o.features=null;const i=new v.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name);this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/edit",{image_spec:i.toString()}),call:this.rbdService.update(i,o)}).subscribe(()=>{},_=>{_.preventDefault(),t&&(this.errorMessage=_.error.detail.replace(/\[.*?\]\s*/,""),o.force=!0,this.modalRef=this.modalService.show(ct.Y,{titleText:"Warning",buttonText:"Enforce",warning:!0,bodyTpl:this.forcePromoteConfirmation,onSubmit:()=>{this.rbdService.update(i,o).subscribe(()=>{this.modalRef.close()},()=>{this.modalRef.close()})}}))})}hasSnapshots(){return(this.selection.first().snapshots||[]).length>0}hasClonedSnapshots(t){return(t.snapshots||[]).some(i=>i.children&&i.children.length>0)}listProtectedSnapshots(){return this.selection.first().snapshots.reduce((i,_)=>(_.is_protected&&i.push(_.name),i),[])}getDeleteDisableDesc(t){const o=t.first();return o&&this.hasClonedSnapshots(o)?"This RBD has cloned snapshots. Please delete related RBDs before deleting this RBD.":this.getInvalidNameDisable(t)||this.hasClonedSnapshots(t.first())}getResyncDisableDesc(t){const o=t.first();return o&&this.imageIsPrimary(o)?"Primary RBD images cannot be resynced":this.getInvalidNameDisable(t)}imageIsPrimary(t){return t.primary}getInvalidNameDisable(t){return t.first()?.name?.match(/[@/]/)?"This RBD image has an invalid name and can't be managed by ceph.":!t.first()||!t.hasSingleSelection}getRemovingStatusDesc(t){return"REMOVING"===t.first()?.source&&"Action not possible for an RBD in status 'Removing'"}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36(Ue.$),e.Y36(st.n),e.Y36(pe.Z),e.Y36(u.P),e.Y36(de.j),e.Y36(dt.F),e.Y36(L.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(W.a,7),e.Gf(sa,5),e.Gf(_a,7),e.Gf(aa,5),e.Gf(ra,7),e.Gf(la,7),e.Gf(ca,7),e.Gf(da,7),e.Gf(pa,7),e.Gf(ua,7),e.Gf(ma,7),e.Gf(ga,7),e.Gf(Ta,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.usageTpl=i.first),e.iGM(i=e.CRH())&&(o.parentTpl=i.first),e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.ScheduleTpl=i.first),e.iGM(i=e.CRH())&&(o.mirroringTpl=i.first),e.iGM(i=e.CRH())&&(o.flattenTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first),e.iGM(i=e.CRH())&&(o.removingStatTpl=i.first),e.iGM(i=e.CRH())&&(o.forcePromoteConfirmation=i.first),e.iGM(i=e.CRH())&&(o.usedTmpl=i.first),e.iGM(i=e.CRH())&&(o.totalUsedTmpl=i.first),e.iGM(i=e.CRH())&&(o.imageUsageTpl=i.first)}},features:[e._Bn([de.j,{provide:dt.F,useValue:new dt.F("block/rbd")}]),e.qOj],decls:23,vars:13,consts:function(){let s,t,o,i,_,a;return s="primary",t="secondary",o="Deleting this image will also delete all its snapshots.",i="The following snapshots are currently protected and will be removed:",_="RBD in status 'Removing'",a="" + "\ufffd#4\ufffd" + " Do you want to force the operation? " + "\ufffd/#4\ufffd" + "",[["columnMode","flex","identifier","unique_id","forceIdentifier","true","selectionType","single",3,"data","columns","searchableObjects","serverSide","count","hasDetails","status","maxLimit","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["parentTpl",""],["mirroringTpl",""],["ScheduleTpl",""],["flattenTpl",""],["deleteTpl",""],["removingStatTpl",""],["forcePromoteConfirmation",""],["imageUsageTpl",""],["usageTooltip",""],[4,"ngIf"],["class","badge badge-info",4,"ngIf","ngIfElse"],["class","badge badge-info",4,"ngIf"],["probb",""],[1,"badge","badge-info"],s,t,["class","alert alert-warning","role","alert",4,"ngIf"],["role","alert",1,"alert","alert-warning"],o,i,[4,"ngFor","ngForOf"],[3,"ngClass",4,"ngIf"],[3,"ngClass"],["title",_,3,"class",4,"ngIf"],["title",_],["type","warning"],[1,"m-4"],a,[3,"ngbTooltip",4,"ngIf","ngIfElse"],["usageBar",""],[3,"ngbTooltip"],["decimals","2",3,"total","used","title",4,"ngIf"],["decimals","2",3,"total","used","title"],[3,"innerHtml"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0,1),e.NdJ("fetchData",function(_){return o.taskListService.fetch(_)})("setExpandedRow",function(_){return o.setExpandedRow(_)})("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(3,"cd-table-actions",2)(4,"cd-rbd-details",3),e.qZA(),e.YNc(5,Ra,2,2,"ng-template",null,4,e.W1O),e.YNc(7,Pa,8,5,"ng-template",null,5,e.W1O),e.YNc(9,Ia,1,1,"ng-template",null,6,e.W1O),e.YNc(11,Na,13,3,"ng-template",null,7,e.W1O),e.YNc(13,va,1,1,"ng-template",null,8,e.W1O),e.YNc(15,xa,5,5,"ng-template",null,9,e.W1O),e.YNc(17,Za,5,1,"ng-template",null,10,e.W1O),e.YNc(19,Ka,3,2,"ng-template",null,11,e.W1O),e.YNc(21,qa,1,1,"ng-template",null,12,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("searchableObjects",!0)("serverSide",!0)("count",o.count)("hasDetails",!0)("status",o.tableStatus)("maxLimit",25)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("selection",o.expandedRow))},dependencies:[c.mk,c.sg,c.O5,F._L,E_.O,it.G,W.a,Me.K,na,tt,Be.N],styles:[".warn[_ngcontent-%COMP%]{color:#d48200}"]}),n})();function za(n,s){1&n&&e._UZ(0,"input",19)}function Ja(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,24),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ya(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,25),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Va(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,26),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ua(n,s){if(1&n&&(e.TgZ(0,"option",27),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function ja(n,s){if(1&n&&(e.TgZ(0,"select",20),e.YNc(1,Ja,2,1,"option",21),e.YNc(2,Ya,2,1,"option",21),e.YNc(3,Va,2,1,"option",21),e.YNc(4,Ua,2,2,"option",22),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function Wa(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,29),e.qZA())}function er(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,30),e.qZA())}function tr(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,31),e.qZA())}let or=(()=>{class n{constructor(t,o,i,_,a,l){this.activeModal=t,this.actionLabels=o,this.authStorageService=i,this.notificationService=_,this.poolService=a,this.rbdService=l,this.pools=null,this.editing=!1,this.poolPermission=this.authStorageService.getPermissions().pool,this.createForm()}createForm(){this.namespaceForm=new Z.d({pool:new r.p4(""),namespace:new r.p4("")},this.validator(),this.asyncValidator())}validator(){return t=>{const o=t.get("pool"),i=t.get("namespace");let _=null;o.value||(_={required:!0}),o.setErrors(_);let a=null;return i.value||(a={required:!0}),i.setErrors(a),null}}asyncValidator(){return t=>new Promise(o=>{const i=t.get("pool"),_=t.get("namespace");this.rbdService.listNamespaces(i.value).subscribe(a=>{if(a.some(l=>l.namespace===_.value)){const l={namespaceExists:!0};_.setErrors(l),o(l)}else o(null)})})}ngOnInit(){this.onSubmit=new Gt.xQ,this.poolPermission.read&&this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{const o=[];for(const i of t)this.rbdService.isRBDPool(i)&&"replicated"===i.type&&o.push(i);if(this.pools=o,1===this.pools.length){const i=this.pools[0].pool_name;this.namespaceForm.get("pool").setValue(i)}})}submit(){const t=this.namespaceForm.getValue("pool"),o=this.namespaceForm.getValue("namespace"),i=new E.R;i.name="rbd/namespace/create",i.metadata={pool:t,namespace:o},this.rbdService.createNamespace(t,o).toPromise().then(()=>{this.notificationService.show(nt.k.success,"Created namespace '" + t + "/" + o + "'"),this.activeModal.close(),this.onSubmit.next()}).catch(()=>{this.namespaceForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(F.Kz),e.Y36(L.p4),e.Y36(oe.j),e.Y36(ve.g),e.Y36(Ve.q),e.Y36(H))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-form-modal"]],decls:23,vars:9,consts:function(){let s,t,o,i,_,a,l,d,g;return s="Create Namespace",t="Pool",o="Name",i="Loading...",_="-- No rbd pools available --",a="-- Select a pool --",l="This field is required.",d="This field is required.",g="Namespace already exists.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","namespaceForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","pool",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-select","formControlName","pool",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","namespace",1,"cd-col-form-label","required"],o,["type","text","placeholder","Namespace name...","id","namespace","name","namespace","formControlName","namespace","autofocus","",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-select"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],i,_,a,[3,"value"],[1,"invalid-feedback"],l,d,g]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"div",7)(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e.YNc(11,za,1,0,"input",11),e.YNc(12,ja,5,4,"select",12),e.YNc(13,Wa,2,0,"span",13),e.qZA()(),e.TgZ(14,"div",7)(15,"label",14),e.SDv(16,15),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",16),e.YNc(19,er,2,0,"span",13),e.YNc(20,tr,2,0,"span",13),e.qZA()()(),e.TgZ(21,"div",17)(22,"cd-form-button-panel",18),e.NdJ("submitActionEvent",function(){return o.submit()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.namespaceForm),e.xp6(7),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("pool",i,"required")),e.xp6(6),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"required")),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"namespaceExists")),e.xp6(2),e.Q6J("form",o.namespaceForm)("submitText",o.actionLabels.CREATE)}},dependencies:[c.sg,c.O5,r._Y,r.YN,r.Kr,r.Fj,r.EJ,r.JJ,r.JL,r.sg,r.u,f.z,A.p,Oe.U,M.o,B.b,K.P,J.V]}),n})(),nr=(()=>{class n{constructor(t,o,i,_,a,l){this.authStorageService=t,this.rbdService=o,this.poolService=i,this.modalService=_,this.notificationService=a,this.actionLabels=l,this.selection=new Ee.r,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"create",icon:T.P.add,click:()=>this.createModal(),name:this.actionLabels.CREATE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Pool",prop:"pool",flexGrow:1},{name:"Total images",prop:"num_images",flexGrow:1}],this.refresh()}refresh(){this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{t=t.filter(i=>this.rbdService.isRBDPool(i)&&"replicated"===i.type);const o=[];t.forEach(i=>{o.push(this.rbdService.listNamespaces(i.pool_name))}),o.length>0?(0,ce.D)(o).subscribe(i=>{const _=[];for(let a=0;a{_.push({id:`${d}/${g.namespace}`,pool:d,namespace:g.namespace,num_images:g.num_images})})}this.namespaces=_}):this.namespaces=[]})}updateSelection(t){this.selection=t}createModal(){this.modalRef=this.modalService.show(or),this.modalRef.componentInstance.onSubmit.subscribe(()=>{this.refresh()})}deleteModal(){const t=this.selection.first().pool,o=this.selection.first().namespace;this.modalRef=this.modalService.show(ue.M,{itemDescription:"Namespace",itemNames:[`${t}/${o}`],submitAction:()=>this.rbdService.deleteNamespace(t,o).subscribe(()=>{this.notificationService.show(nt.k.success,"Deleted namespace '" + t + "/" + o + "'"),this.modalRef.close(),this.refresh()},()=>{this.modalRef.componentInstance.stopLoadingSpinner()})})}getDeleteDisableDesc(){return this.selection.first()?.num_images>0?"Namespace contains images":!this.selection?.first()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36(Ve.q),e.Y36(pe.Z),e.Y36(ve.g),e.Y36(L.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-list"]],features:[e._Bn([de.j])],decls:4,vars:5,consts:[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"]],template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(_){return o.updateSelection(_)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.qZA()()),2&t&&(e.xp6(1),e.Q6J("data",o.namespaces)("columns",o.columns),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},dependencies:[W.a,Me.K,tt]}),n})(),ir=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-performance"]],decls:2,vars:2,consts:function(){let s;return s="RBD overview",[["title",s,"uid","41FrpeUiz","grafanaStyle","two",3,"grafanaPath","type"]]},template:function(t,o){1&t&&e._UZ(0,"cd-rbd-tabs")(1,"cd-grafana",0),2&t&&(e.xp6(1),e.Q6J("grafanaPath","rbd-overview?")("type","metrics"))},dependencies:[vt.F,tt]}),n})();var sr=p(91801);function _r(n,s){1&n&&e._UZ(0,"input",15)}function ar(n,s){if(1&n&&(e.TgZ(0,"option",20),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function rr(n,s){if(1&n&&(e.TgZ(0,"select",16)(1,"option",17),e.SDv(2,18),e.qZA(),e.YNc(3,ar,2,2,"option",19),e.qZA()),2&n){const t=e.oxw();e.xp6(3),e.Q6J("ngForOf",t.pools)}}let lr=(()=>{class n{constructor(t,o,i,_,a,l,d){this.authStorageService=t,this.rbdService=o,this.activeModal=i,this.actionLabels=_,this.fb=a,this.poolService=l,this.taskWrapper=d,this.poolPermission=this.authStorageService.getPermissions().pool}createForm(){this.purgeForm=this.fb.group({poolName:""})}ngOnInit(){this.poolPermission.read&&this.poolService.list(["pool_name","application_metadata"]).then(t=>{this.pools=t.filter(o=>o.application_metadata.includes("rbd")).map(o=>o.pool_name)}),this.createForm()}purge(){const t=this.purgeForm.getValue("poolName")||"";this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/trash/purge",{pool_name:t}),call:this.rbdService.purgeTrash(t)}).subscribe({error:()=>{this.purgeForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36(F.Kz),e.Y36(L.p4),e.Y36(pt.O),e.Y36(Ve.q),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-purge-modal"]],decls:18,vars:6,consts:function(){let s,t,o,i,_;return s="Purge Trash",t="To purge, select\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "All" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + "\xA0 or one pool and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Purge" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".\xA0",t=e.Zx4(t),o="Pool:",i="Pool name...",_="All",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","purgeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],[1,"col-form-label","mx-auto"],o,["class","form-control","type","text","placeholder",i,"formControlName","poolName",4,"ngIf"],["id","poolName","name","poolName","class","form-control","formControlName","poolName",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder",i,"formControlName","poolName",1,"form-control"],["id","poolName","name","poolName","formControlName","poolName",1,"form-control"],["value",""],_,[3,"value",4,"ngFor","ngForOf"],[3,"value"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.tHW(8,7),e._UZ(9,"kbd")(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e.YNc(14,_r,1,0,"input",11),e.YNc(15,rr,4,1,"select",12),e.qZA()(),e.TgZ(16,"div",13)(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.purge()}),e.qZA()()(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.purgeForm),e.xp6(10),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(2),e.Q6J("form",o.purgeForm)("submitText",o.actionLabels.PURGE))},dependencies:[c.sg,c.O5,r._Y,r.YN,r.Kr,r.Fj,r.EJ,r.JJ,r.JL,r.sg,r.u,f.z,A.p,M.o,B.b,K.P,J.V]}),n})();function cr(n,s){1&n&&(e.TgZ(0,"span",15),e.SDv(1,16),e.qZA())}let dr=(()=>{class n{constructor(t,o,i,_,a){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=_,this.taskWrapper=a}ngOnInit(){this.imageSpec=new v.N(this.poolName,this.namespace,this.imageName).toString(),this.restoreForm=this.fb.group({name:this.imageName})}restore(){const t=this.restoreForm.getValue("name"),o=new v.N(this.poolName,this.namespace,this.imageId);this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/trash/restore",{image_id_spec:o.toString(),new_image_name:t}),call:this.rbdService.restoreTrash(o,t)}).subscribe({error:()=>{this.restoreForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(H),e.Y36(F.Kz),e.Y36(L.p4),e.Y36(pt.O),e.Y36(u.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-restore-modal"]],decls:18,vars:7,consts:function(){let s,t,o,i;return s="Restore Image",t="To restore\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "" + "\ufffd0\ufffd" + "@" + "\ufffd1\ufffd" + "" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ",\xA0 type the image's new name and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Restore" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".",t=e.Zx4(t),o="New Name",i="This field is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","restoreForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","name",1,"col-form-label"],o,["type","text","name","name","id","name","autocomplete","off","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.tHW(8,7),e._UZ(9,"kbd")(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,cr,2,0,"span",12),e.qZA()(),e.TgZ(16,"div",13)(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.restore()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.restoreForm),e.xp6(6),e.pQV(o.imageSpec)(o.imageId),e.QtT(8),e.xp6(5),e.Q6J("ngIf",o.restoreForm.showError("name",i,"required")),e.xp6(2),e.Q6J("form",o.restoreForm)("submitText",o.actionLabels.RESTORE)}},dependencies:[c.O5,r._Y,r.Fj,r.JJ,r.JL,r.sg,r.u,f.z,A.p,Oe.U,M.o,B.b,K.P,J.V]}),n})();const pr=["expiresTpl"],ur=["deleteTpl"],mr=function(n){return[n]};function gr(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"button",6),e.NdJ("click",function(){e.CHM(t);const i=e.oxw();return e.KtG(i.purgeModal())}),e._UZ(1,"i",7),e.ynx(2),e.SDv(3,8),e.BQk(),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("disabled",t.disablePurgeBtn),e.xp6(1),e.Q6J("ngClass",e.VKq(2,mr,t.icons.destroy))}}function Tr(n,s){1&n&&(e.ynx(0),e.SDv(1,10),e.BQk())}function fr(n,s){1&n&&(e.ynx(0),e.SDv(1,11),e.BQk())}function Cr(n,s){if(1&n&&(e.YNc(0,Tr,2,0,"ng-container",9),e.YNc(1,fr,2,0,"ng-container",9),e._uU(2),e.ALo(3,"cdDate")),2&n){const t=s.row,o=s.value;e.Q6J("ngIf",t.cdIsExpired),e.xp6(1),e.Q6J("ngIf",!t.cdIsExpired),e.xp6(1),e.hij(" ",e.lcZ(3,3,o),"\n")}}function Sr(n,s){if(1&n&&(e.TgZ(0,"p",13)(1,"strong"),e.ynx(2),e.SDv(3,14),e.ALo(4,"cdDate"),e.BQk(),e.qZA()()),2&n){const t=e.oxw().expiresAt;e.xp6(4),e.pQV(e.lcZ(4,1,t)),e.QtT(3)}}function Rr(n,s){1&n&&e.YNc(0,Sr,5,3,"p",12),2&n&&e.Q6J("ngIf",!s.isExpired)}let Er=(()=>{class n{constructor(t,o,i,_,a,l,d){this.authStorageService=t,this.rbdService=o,this.modalService=i,this.cdDatePipe=_,this.taskListService=a,this.taskWrapper=l,this.actionLabels=d,this.icons=T.P,this.executingTasks=[],this.selection=new Ee.r,this.tableStatus=new se.E,this.disablePurgeBtn=!0,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"update",icon:T.P.undo,click:()=>this.restoreModal(),name:this.actionLabels.RESTORE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE}]}ngOnInit(){this.columns=[{name:"ID",prop:"id",flexGrow:1,cellTransformation:Le.e.executing},{name:"Name",prop:"name",flexGrow:1},{name:"Pool",prop:"pool_name",flexGrow:1},{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Status",prop:"deferment_end_time",flexGrow:1,cellTemplate:this.expiresTpl},{name:"Deleted At",prop:"deletion_time",flexGrow:1,pipe:this.cdDatePipe}],this.taskListService.init(()=>this.rbdService.listTrash(),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/trash/remove","rbd/trash/restore"].includes(i.name),(i,_)=>new v.N(i.pool_name,i.namespace,i.id).toString()===_.metadata.image_id_spec,void 0)}prepareResponse(t){let o=[];const i={};let _;if(t.forEach(a=>{C().isUndefined(i[a.status])&&(i[a.status]=[]),i[a.status].push(a.pool_name),o=o.concat(a.value),this.disablePurgeBtn=!o.length}),i[3]?_=3:i[1]?_=1:i[2]&&(_=2),_){const a=(i[_].length>1?"pools ":"pool ")+i[_].join();this.tableStatus=new se.E(_,a)}else this.tableStatus=new se.E;return o.forEach(a=>{a.cdIsExpired=he()().isAfter(a.deferment_end_time)}),o}onFetchError(){this.table.reset(),this.tableStatus=new se.E(sr.T.ValueException)}updateSelection(t){this.selection=t}restoreModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,imageId:this.selection.first().id};this.modalRef=this.modalService.show(dr,t)}deleteModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().id,_=this.selection.first().deferment_end_time,a=he()().isAfter(_),l=new v.N(t,o,i);this.modalRef=this.modalService.show(ue.M,{itemDescription:"RBD",itemNames:[l],bodyTemplate:this.deleteTpl,bodyContext:{expiresAt:_,isExpired:a},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new E.R("rbd/trash/remove",{image_id_spec:l.toString()}),call:this.rbdService.removeTrash(l,!0)})})}purgeModal(){this.modalService.show(lr)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36(pe.Z),e.Y36(Be.N),e.Y36(de.j),e.Y36(u.P),e.Y36(L.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(W.a,7),e.Gf(pr,7),e.Gf(ur,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.expiresTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first)}},features:[e._Bn([de.j])],decls:9,vars:8,consts:function(){let s,t,o,i;return s="Purge Trash",t="Expired at",o="Protected until",i="This image is protected until " + "\ufffd0\ufffd" + ".",[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","status","autoReload","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["class","btn btn-light","type","button",3,"disabled","click",4,"ngIf"],["expiresTpl",""],["deleteTpl",""],["type","button",1,"btn","btn-light",3,"disabled","click"],["aria-hidden","true",3,"ngClass"],s,[4,"ngIf"],t,o,["class","text-danger",4,"ngIf"],[1,"text-danger"],i]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.taskListService.fetch()})("updateSelection",function(_){return o.updateSelection(_)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.YNc(4,gr,4,4,"button",3),e.qZA()(),e.YNc(5,Cr,4,5,"ng-template",null,4,e.W1O),e.YNc(7,Rr,1,1,"ng-template",null,5,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("status",o.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("ngIf",o.permission.delete))},dependencies:[c.mk,c.O5,W.a,Me.K,M.o,tt,Be.N]}),n})(),yt=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[c.ez,si,r.u5,r.UX,F.Oz,F.dT,F.HK,Ne.b,Pe.m,m.Bz,ne.xc]}),n})();const Mr=[{path:"",redirectTo:"rbd",pathMatch:"full"},{path:"rbd",canActivate:[j.T,re.P],data:{moduleStatusGuardConfig:{uiApiPath:"block/rbd",redirectTo:"error",header:"No RBD pools available",button_name:"Create RBD pool",button_route:"/pool/create"},breadcrumbs:"Images"},children:[{path:"",component:Qa},{path:"namespaces",component:nr,data:{breadcrumbs:"Namespaces"}},{path:"trash",component:Er,data:{breadcrumbs:"Trash"}},{path:"performance",component:ir,data:{breadcrumbs:"Overall Performance"}},{path:L.MQ.CREATE,component:$e,data:{breadcrumbs:L.Qn.CREATE}},{path:`${L.MQ.EDIT}/:image_spec`,component:$e,data:{breadcrumbs:L.Qn.EDIT}},{path:`${L.MQ.CLONE}/:image_spec/:snap`,component:$e,data:{breadcrumbs:L.Qn.CLONE}},{path:`${L.MQ.COPY}/:image_spec`,component:$e,data:{breadcrumbs:L.Qn.COPY}},{path:`${L.MQ.COPY}/:image_spec/:snap`,component:$e,data:{breadcrumbs:L.Qn.COPY}}]},{path:"mirroring",component:_s,canActivate:[j.T,re.P],data:{moduleStatusGuardConfig:{uiApiPath:"block/mirroring",redirectTo:"error",header:"RBD mirroring is not configured",button_name:"Configure RBD Mirroring",button_title:"This will create rbd-mirror service and a replicated RBD pool",component:"RBD Mirroring",uiConfig:!0},breadcrumbs:"Mirroring"},children:[{path:`${L.MQ.EDIT}/:pool_name`,component:cs,outlet:"modal"}]},{path:"iscsi",canActivate:[j.T],data:{breadcrumbs:"iSCSI"},children:[{path:"",redirectTo:"overview",pathMatch:"full"},{path:"overview",component:ii,data:{breadcrumbs:"Overview"}},{path:"targets",data:{breadcrumbs:"Targets"},children:[{path:"",component:Kn},{path:L.MQ.CREATE,component:Ct,data:{breadcrumbs:L.Qn.CREATE}},{path:`${L.MQ.EDIT}/:target_iqn`,component:Ct,data:{breadcrumbs:L.Qn.EDIT}}]}]}];let Or=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[yt,m.Bz.forChild(Mr)]}),n})()},60950:(mt,Ae,p)=>{p.d(Ae,{d:()=>z});var c=p(64537),r=p(20092),m=p(23815),ne=p.n(m),F=p(7357),Ne=p(65862),L=p(95463),j=p(30633),re=p(28211),Pe=p(34089),le=p(88692),C=p(18372),ce=p(20044);let D=(()=>{class h{constructor(u,f,A,M){this.elementRef=u,this.control=f,this.dimlessBinaryPerSecondPipe=A,this.formatter=M,this.ngModelChange=new c.vpe,this.el=this.elementRef.nativeElement}ngOnInit(){this.setValue(this.el.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.el.value))}setValue(u){/^[\d.]+$/.test(u)&&(u+=this.defaultUnit||"m");const f=this.formatter.toBytes(u,0),A=this.round(f);this.el.value=this.dimlessBinaryPerSecondPipe.transform(A),null!==f?(this.ngModelChange.emit(this.el.value),this.control.control.setValue(this.el.value)):(this.ngModelChange.emit(null),this.control.control.setValue(null))}round(u){if(null!==u&&0!==u){if(!ne().isUndefined(this.minBytes)&&uthis.maxBytes)return this.maxBytes;if(!ne().isUndefined(this.roundPower)){const f=Math.round(Math.log(u)/Math.log(this.roundPower));return Math.pow(this.roundPower,f)}}return u}onBlur(u){this.setValue(u)}}return h.\u0275fac=function(u){return new(u||h)(c.Y36(c.SBq),c.Y36(r.a5),c.Y36(ce.O),c.Y36(re.H))},h.\u0275dir=c.lG2({type:h,selectors:[["","cdDimlessBinaryPerSecond",""]],hostBindings:function(u,f){1&u&&c.NdJ("blur",function(M){return f.onBlur(M.target.value)})},inputs:{ngDataReady:"ngDataReady",minBytes:"minBytes",maxBytes:"maxBytes",roundPower:"roundPower",defaultUnit:"defaultUnit"},outputs:{ngModelChange:"ngModelChange"}}),h})(),ie=(()=>{class h{constructor(u,f){this.control=u,this.formatter=f}setValue(u){const f=this.formatter.toMilliseconds(u);this.control.control.setValue(`${f} ms`)}ngOnInit(){this.setValue(this.control.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.control.value))}onUpdate(u){this.setValue(u)}}return h.\u0275fac=function(u){return new(u||h)(c.Y36(r.a5),c.Y36(re.H))},h.\u0275dir=c.lG2({type:h,selectors:[["","cdMilliseconds",""]],hostBindings:function(u,f){1&u&&c.NdJ("blur",function(M){return f.onUpdate(M.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),h})(),V=(()=>{class h{constructor(u,f){this.formatter=u,this.ngControl=f}setValue(u){const f=this.formatter.toIops(u);this.ngControl.control.setValue(`${f} IOPS`)}ngOnInit(){this.setValue(this.ngControl.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.ngControl.value))}onUpdate(u){this.setValue(u)}}return h.\u0275fac=function(u){return new(u||h)(c.Y36(re.H),c.Y36(r.a5))},h.\u0275dir=c.lG2({type:h,selectors:[["","cdIops",""]],hostBindings:function(u,f){1&u&&c.NdJ("blur",function(M){return f.onUpdate(M.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),h})();var e=p(87925),X=p(94276),Fe=p(56310),De=p(41582);function v(h,E){if(1&h&&(c.ynx(0),c._UZ(1,"input",17),c.BQk()),2&h){const u=c.oxw().$implicit,f=c.oxw(2);c.xp6(1),c.Q6J("id",u.name)("name",u.name)("formControlName",u.name)("ngDataReady",f.ngDataReady)}}function be(h,E){if(1&h&&(c.ynx(0),c._UZ(1,"input",18),c.BQk()),2&h){const u=c.oxw().$implicit,f=c.oxw(2);c.xp6(1),c.Q6J("id",u.name)("name",u.name)("formControlName",u.name)("ngDataReady",f.ngDataReady)}}function H(h,E){if(1&h&&(c.ynx(0),c._UZ(1,"input",19),c.BQk()),2&h){const u=c.oxw().$implicit,f=c.oxw(2);c.xp6(1),c.Q6J("id",u.name)("name",u.name)("formControlName",u.name)("ngDataReady",f.ngDataReady)}}function N(h,E){1&h&&(c.TgZ(0,"span",20),c.SDv(1,21),c.qZA())}const x=function(h){return{active:h}},T=function(h){return[h]};function k(h,E){if(1&h){const u=c.EpF();c.TgZ(0,"div",10)(1,"label",11),c._uU(2),c.TgZ(3,"cd-helper"),c._uU(4),c.qZA()(),c.TgZ(5,"div")(6,"div",12),c.ynx(7,13),c.YNc(8,v,2,4,"ng-container",14),c.YNc(9,be,2,4,"ng-container",14),c.YNc(10,H,2,4,"ng-container",14),c.BQk(),c.TgZ(11,"button",15),c.NdJ("click",function(){const M=c.CHM(u).$implicit,B=c.oxw(2);return c.KtG(B.reset(M.name))}),c._UZ(12,"i",7),c.qZA()(),c.YNc(13,N,2,0,"span",16),c.qZA()()}if(2&h){const u=E.$implicit,f=c.oxw().$implicit,A=c.oxw(),M=c.MAs(1);c.xp6(1),c.Q6J("for",u.name),c.xp6(1),c.Oqu(u.displayName),c.xp6(2),c.Oqu(u.description),c.xp6(1),c.Gre("cd-col-form-input ",f.heading,""),c.xp6(2),c.Q6J("ngSwitch",u.type),c.xp6(1),c.Q6J("ngSwitchCase",A.configurationType.milliseconds),c.xp6(1),c.Q6J("ngSwitchCase",A.configurationType.bps),c.xp6(1),c.Q6J("ngSwitchCase",A.configurationType.iops),c.xp6(1),c.Q6J("ngClass",c.VKq(13,x,A.isDisabled(u.name))),c.xp6(1),c.Q6J("ngClass",c.VKq(15,T,A.icons.erase)),c.xp6(1),c.Q6J("ngIf",A.form.showError("configuration."+u.name,M,"min"))}}function Z(h,E){if(1&h){const u=c.EpF();c.TgZ(0,"div",4)(1,"h4",5)(2,"span",6),c.NdJ("click",function(){const M=c.CHM(u).$implicit,B=c.oxw();return c.KtG(B.toggleSectionVisibility(M.class))}),c._uU(3),c._UZ(4,"i",7),c.qZA()(),c.TgZ(5,"div",8),c.YNc(6,k,14,17,"div",9),c.qZA()()}if(2&h){const u=E.$implicit,f=c.oxw();c.xp6(3),c.hij(" ",u.heading," "),c.xp6(1),c.Q6J("ngClass",f.sectionVisibility[u.class]?f.icons.minusCircle:f.icons.addCircle),c.xp6(1),c.Tol(u.class),c.Q6J("hidden",!f.sectionVisibility[u.class]),c.xp6(1),c.Q6J("ngForOf",u.options)}}let z=(()=>{class h{constructor(u,f){this.formatterService=u,this.rbdConfigurationService=f,this.initializeData=new F.t(1),this.changes=new c.vpe,this.icons=Ne.P,this.ngDataReady=new c.vpe,this.configurationType=j.r,this.sectionVisibility={}}ngOnInit(){const u=this.createConfigurationFormGroup();this.form.addControl("configuration",u),u.valueChanges.subscribe(()=>{this.changes.emit(this.getDirtyValues.bind(this))}),this.initializeData&&this.initializeData.subscribe(f=>{this.initialData=f.initialData;const A=f.sourceType;this.rbdConfigurationService.getWritableOptionFields().forEach(M=>{const B=f.initialData.filter(K=>K.name===M.name).pop();B&&B.source===A&&this.form.get(`configuration.${M.name}`).setValue(B.value)}),this.ngDataReady.emit()}),this.rbdConfigurationService.getWritableSections().forEach(f=>this.sectionVisibility[f.class]=!1)}getDirtyValues(u=!1,f){if(u&&!f)throw new Error("ProgrammingError: If local values shall be included, a proper localFieldType argument has to be provided, too");const A={};return this.rbdConfigurationService.getWritableOptionFields().forEach(M=>{const B=this.form.get("configuration").get(M.name);this.initialData&&this.initialData[M.name]===B.value||(B.dirty||u&&B.source===f)&&(A[M.name]=null===B.value?B.value:M.type===j.r.bps?this.formatterService.toBytes(B.value):M.type===j.r.milliseconds?this.formatterService.toMilliseconds(B.value):M.type===j.r.iops?this.formatterService.toIops(B.value):B.value)}),A}createConfigurationFormGroup(){const u=new L.d({});return this.rbdConfigurationService.getWritableOptionFields().forEach(f=>{let A;if(f.type!==j.r.milliseconds&&f.type!==j.r.iops&&f.type!==j.r.bps)throw new Error(`Type ${f.type} is unknown, you may need to add it to RbdConfiguration class`);{let M=0;ne().forEach(this.initialData,B=>{B.name===f.name&&(M=B.value)}),A=new r.p4(M,r.kI.min(0))}u.addControl(f.name,A)}),u}reset(u){const f=this.form.get("configuration").get(u);f.disabled?(f.setValue(f.previousValue||0),f.enable(),f.previousValue||f.markAsPristine()):(f.previousValue=f.value,f.setValue(null),f.markAsDirty(),f.disable())}isDisabled(u){return this.form.get("configuration").get(u).disabled}toggleSectionVisibility(u){this.sectionVisibility[u]=!this.sectionVisibility[u]}}return h.\u0275fac=function(u){return new(u||h)(c.Y36(re.H),c.Y36(Pe.n))},h.\u0275cmp=c.Xpm({type:h,selectors:[["cd-rbd-configuration-form"]],inputs:{form:"form",initializeData:"initializeData"},outputs:{changes:"changes"},decls:5,vars:2,consts:function(){let E,u,f;return E="RBD Configuration",u="Remove the local configuration value. The parent configuration value will be inherited and used instead.",f="The minimum value is 0",[[3,"formGroup"],["cfgFormGroup",""],E,["class","col-12",4,"ngFor","ngForOf"],[1,"col-12"],[1,"cd-header"],[1,"collapsible",3,"click"],["aria-hidden","true",3,"ngClass"],[3,"hidden"],["class","form-group row",4,"ngFor","ngForOf"],[1,"form-group","row"],[1,"cd-col-form-label",3,"for"],[1,"input-group"],[3,"ngSwitch"],[4,"ngSwitchCase"],["type","button","data-toggle","button","title",u,1,"btn","btn-light",3,"ngClass","click"],["class","invalid-feedback",4,"ngIf"],["type","text","cdMilliseconds","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","defaultUnit","b","cdDimlessBinaryPerSecond","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","cdIops","",1,"form-control",3,"id","name","formControlName","ngDataReady"],[1,"invalid-feedback"],f]},template:function(u,f){1&u&&(c.TgZ(0,"fieldset",0,1)(2,"legend"),c.SDv(3,2),c.qZA(),c.YNc(4,Z,7,7,"div",3),c.qZA()),2&u&&(c.Q6J("formGroup",f.form.get("configuration")),c.xp6(4),c.Q6J("ngForOf",f.rbdConfigurationService.sections))},dependencies:[le.mk,le.sg,le.O5,le.RF,le.n9,r.Fj,r.JJ,r.JL,r.sg,r.u,C.S,D,ie,V,e.o,X.b,Fe.P,De.V],styles:[".collapsible[_ngcontent-%COMP%]{cursor:pointer;user-select:none}"]}),h})()},42176:(mt,Ae,p)=>{p.d(Ae,{P:()=>H});var c=p(59019),r=p(30633),m=p(64537);let ne=(()=>{class N{transform(T){return{0:"global",1:"pool",2:"image"}[T]}}return N.\u0275fac=function(T){return new(T||N)},N.\u0275pipe=m.Yjl({name:"rbdConfigurationSource",type:N,pure:!0}),N})();var F=p(28211),Ne=p(34089),L=p(88692),j=p(20044),re=p(48537),Pe=p(21766);const le=["configurationSourceTpl"],C=["configurationValueTpl"],ce=["poolConfTable"];function D(N,x){1&N&&(m.TgZ(0,"span"),m.SDv(1,6),m.qZA())}function ie(N,x){1&N&&(m.TgZ(0,"strong"),m.SDv(1,7),m.qZA())}function V(N,x){1&N&&(m.TgZ(0,"strong"),m.SDv(1,8),m.qZA())}function e(N,x){1&N&&(m.TgZ(0,"div",4),m.YNc(1,D,2,0,"span",5),m.YNc(2,ie,2,0,"strong",5),m.YNc(3,V,2,0,"strong",5),m.qZA()),2&N&&(m.Q6J("ngSwitch",x.value),m.xp6(1),m.Q6J("ngSwitchCase","global"),m.xp6(1),m.Q6J("ngSwitchCase","image"),m.xp6(1),m.Q6J("ngSwitchCase","pool"))}function X(N,x){if(1&N&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"dimlessBinaryPerSecond"),m.qZA()),2&N){const T=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,T))}}function Fe(N,x){if(1&N&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"milliseconds"),m.qZA()),2&N){const T=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,T))}}function De(N,x){if(1&N&&(m.TgZ(0,"span"),m._uU(1),m.ALo(2,"iops"),m.qZA()),2&N){const T=m.oxw().value;m.xp6(1),m.Oqu(m.lcZ(2,1,T))}}function v(N,x){if(1&N&&(m.TgZ(0,"span"),m._uU(1),m.qZA()),2&N){const T=m.oxw().value;m.xp6(1),m.Oqu(T)}}function be(N,x){if(1&N&&(m.TgZ(0,"div",4),m.YNc(1,X,3,3,"span",5),m.YNc(2,Fe,3,3,"span",5),m.YNc(3,De,3,3,"span",5),m.YNc(4,v,2,1,"span",9),m.qZA()),2&N){const T=x.row,k=m.oxw();m.Q6J("ngSwitch",T.type),m.xp6(1),m.Q6J("ngSwitchCase",k.typeField.bps),m.xp6(1),m.Q6J("ngSwitchCase",k.typeField.milliseconds),m.xp6(1),m.Q6J("ngSwitchCase",k.typeField.iops)}}let H=(()=>{class N{constructor(T,k){this.formatterService=T,this.rbdConfigurationService=k,this.sourceField=r.h,this.typeField=r.r}ngOnInit(){this.poolConfigurationColumns=[{prop:"displayName",name:"Name"},{prop:"description",name:"Description"},{prop:"name",name:"Key"},{prop:"source",name:"Source",cellTemplate:this.configurationSourceTpl,pipe:new ne},{prop:"value",name:"Value",cellTemplate:this.configurationValueTpl}]}ngOnChanges(){this.data&&(this.data=this.data.filter(T=>this.rbdConfigurationService.getOptionFields().map(k=>k.name).includes(T.name)))}}return N.\u0275fac=function(T){return new(T||N)(m.Y36(F.H),m.Y36(Ne.n))},N.\u0275cmp=m.Xpm({type:N,selectors:[["cd-rbd-configuration-table"]],viewQuery:function(T,k){if(1&T&&(m.Gf(le,7),m.Gf(C,7),m.Gf(ce,7)),2&T){let Z;m.iGM(Z=m.CRH())&&(k.configurationSourceTpl=Z.first),m.iGM(Z=m.CRH())&&(k.configurationValueTpl=Z.first),m.iGM(Z=m.CRH())&&(k.poolConfTable=Z.first)}},inputs:{data:"data"},features:[m.TTD],decls:6,vars:2,consts:function(){let x,T,k;return x="Global",T="Image",k="Pool",[["identifier","name",3,"data","columns"],["poolConfTable",""],["configurationSourceTpl",""],["configurationValueTpl",""],[3,"ngSwitch"],[4,"ngSwitchCase"],x,T,k,[4,"ngSwitchDefault"]]},template:function(T,k){1&T&&(m._UZ(0,"cd-table",0,1),m.YNc(2,e,4,4,"ng-template",null,2,m.W1O),m.YNc(4,be,5,4,"ng-template",null,3,m.W1O)),2&T&&m.Q6J("data",k.data)("columns",k.poolConfigurationColumns)},dependencies:[L.RF,L.n9,L.ED,c.a,j.O,re.J,Pe.A]}),N})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt index e76e29bef..0815759ea 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/3rdpartylicenses.txt @@ -378,6 +378,9 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +@swagger-api/apidom-reference +Apache-2.0 + @swimlane/ngx-datatable MIT (The MIT License) @@ -739,31 +742,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -cross-fetch -MIT -The MIT License (MIT) - -Copyright (c) 2017 Leonardo Quixadá - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - css.escape MIT Copyright Mathias Bynens @@ -1315,31 +1293,6 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI format MIT -formdata-node -MIT -The MIT License (MIT) - -Copyright (c) 2017-present Nick K. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - function-bind MIT Copyright (c) 2013 Raynos. @@ -1550,7 +1503,7 @@ immutable MIT MIT License -Copyright (c) 2014-present, Facebook, Inc. +Copyright (c) 2014-present, Lee Byron and other contributors. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -2116,6 +2069,32 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +process +MIT +(The MIT License) + +Copyright (c) 2013 Roman Shtylman + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + prop-types MIT MIT License @@ -2141,30 +2120,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -punycode -MIT -Copyright Mathias Bynens - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - qs BSD-3-Clause BSD 3-Clause License @@ -2198,34 +2153,37 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -querystring +querystringify MIT +The MIT License (MIT) + +Copyright (c) 2015 Unshift.io, Arnout Kazemier, the Contributors. -Copyright 2012 Irakli Gozalishvili. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -querystringify + +ramda MIT The MIT License (MIT) -Copyright (c) 2015 Unshift.io, Arnout Kazemier, the Contributors. +Copyright (c) 2013-2023 Scott Sauyet and Michael Hurley Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -2234,17 +2192,45 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +ramda-adjunct +BSD-3-Clause +BSD 3-Clause License +Copyright 2017-2019 Vladimír Gorej and the Ramda Adjunct contributors + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used + to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. randexp @@ -3454,31 +3440,6 @@ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -url -MIT -The MIT License (MIT) - -Copyright Joyent, Inc. and other Node contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - url-parse MIT The MIT License (MIT) diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/43.cf51dac96ed4b14e.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/43.cf51dac96ed4b14e.js deleted file mode 100644 index db0c092cd..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/43.cf51dac96ed4b14e.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[43],{21043:(ut,he,p)=>{p.r(he),p.d(he,{BlockModule:()=>Gt,RoutedBlockModule:()=>Cr});var c=p(11048),a=p(1659),g=p(55860),ne=p(62946),L=p(71334),be=p(37496),D=p(79512),U=p(4222),re=p(54462),Ae=p(44466),Ne=p(23815),C=p.n(Ne),le=p(35758),F=p(64762),ie=p(19725),Y=p(93523),e=p(89724);let X=class{constructor(s){this.http=s}listTargets(){return this.http.get("api/iscsi/target")}getTarget(s){return this.http.get(`api/iscsi/target/${s}`)}updateTarget(s,t){return this.http.put(`api/iscsi/target/${s}`,t,{observe:"response"})}status(){return this.http.get("ui-api/iscsi/status")}settings(){return this.http.get("ui-api/iscsi/settings")}version(){return this.http.get("ui-api/iscsi/version")}portals(){return this.http.get("ui-api/iscsi/portals")}createTarget(s){return this.http.post("api/iscsi/target",s,{observe:"response"})}deleteTarget(s){return this.http.delete(`api/iscsi/target/${s}`,{observe:"response"})}getDiscovery(){return this.http.get("api/iscsi/discoveryauth")}updateDiscovery(s){return this.http.put("api/iscsi/discoveryauth",s)}overview(){return this.http.get("ui-api/iscsi/overview")}};X.\u0275fac=function(s){return new(s||X)(e.LFG(ie.eN))},X.\u0275prov=e.Yz7({token:X,factory:X.\u0275fac,providedIn:"root"}),X=(0,F.gn)([Y.o,(0,F.w6)("design:paramtypes",[ie.eN])],X);var Fe=p(88002),De=p(76189),v=p(19358),Pe=p(34089);let H=class extends De.S{constructor(s,t){super(),this.http=s,this.rbdConfigurationService=t}isRBDPool(s){return-1!==C().indexOf(s.application_metadata,"rbd")&&!s.pool_name.includes("/")}create(s){return this.http.post("api/block/image",s,{observe:"response"})}delete(s){return this.http.delete(`api/block/image/${s.toStringEncoded()}`,{observe:"response"})}update(s,t){return this.http.put(`api/block/image/${s.toStringEncoded()}`,t,{observe:"response"})}get(s){return this.http.get(`api/block/image/${s.toStringEncoded()}`)}list(s){return this.http.get("api/block/image",{params:s,headers:{Accept:this.getVersionHeaderValue(2,0)},observe:"response"}).pipe((0,Fe.U)(t=>t.body.map(o=>(o.value.map(i=>(i.configuration&&i.configuration.map(_=>Object.assign(_,this.rbdConfigurationService.getOptionByName(_.name))),i)),o.headers=t.headers,o))))}copy(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/copy`,t,{observe:"response"})}flatten(s){return this.http.post(`api/block/image/${s.toStringEncoded()}/flatten`,null,{observe:"response"})}defaultFeatures(){return this.http.get("api/block/image/default_features")}cloneFormatVersion(){return this.http.get("api/block/image/clone_format_version")}createSnapshot(s,t,o){const i={snapshot_name:t,mirrorImageSnapshot:o};return this.http.post(`api/block/image/${s.toStringEncoded()}/snap`,i,{observe:"response"})}renameSnapshot(s,t,o){const i={new_snap_name:o};return this.http.put(`api/block/image/${s.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}protectSnapshot(s,t,o){const i={is_protected:o};return this.http.put(`api/block/image/${s.toStringEncoded()}/snap/${t}`,i,{observe:"response"})}rollbackSnapshot(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/snap/${t}/rollback`,null,{observe:"response"})}cloneSnapshot(s,t,o){return this.http.post(`api/block/image/${s.toStringEncoded()}/snap/${t}/clone`,o,{observe:"response"})}deleteSnapshot(s,t){return this.http.delete(`api/block/image/${s.toStringEncoded()}/snap/${t}`,{observe:"response"})}listTrash(){return this.http.get("api/block/image/trash/")}createNamespace(s,t){return this.http.post(`api/block/pool/${s}/namespace`,{namespace:t},{observe:"response"})}listNamespaces(s){return this.http.get(`api/block/pool/${s}/namespace/`)}deleteNamespace(s,t){return this.http.delete(`api/block/pool/${s}/namespace/${t}`,{observe:"response"})}moveTrash(s,t){return this.http.post(`api/block/image/${s.toStringEncoded()}/move_trash`,{delay:t},{observe:"response"})}purgeTrash(s){return this.http.post(`api/block/image/trash/purge/?pool_name=${s}`,null,{observe:"response"})}restoreTrash(s,t){return this.http.post(`api/block/image/trash/${s.toStringEncoded()}/restore`,{new_image_name:t},{observe:"response"})}removeTrash(s,t=!1){return this.http.delete(`api/block/image/trash/${s.toStringEncoded()}/?force=${t}`,{observe:"response"})}};H.\u0275fac=function(s){return new(s||H)(e.LFG(ie.eN),e.LFG(Pe.n))},H.\u0275prov=e.Yz7({token:H,factory:H.\u0275fac,providedIn:"root"}),(0,F.gn)([(0,F.fM)(1,Y.G),(0,F.w6)("design:type",Function),(0,F.w6)("design:paramtypes",[v.N,String,Boolean]),(0,F.w6)("design:returntype",void 0)],H.prototype,"createSnapshot",null),(0,F.gn)([(0,F.fM)(2,Y.G),(0,F.w6)("design:type",Function),(0,F.w6)("design:paramtypes",[v.N,String,String]),(0,F.w6)("design:returntype",void 0)],H.prototype,"renameSnapshot",null),(0,F.gn)([(0,F.fM)(2,Y.G),(0,F.w6)("design:type",Function),(0,F.w6)("design:paramtypes",[v.N,String,Boolean]),(0,F.w6)("design:returntype",void 0)],H.prototype,"protectSnapshot",null),(0,F.gn)([(0,F.fM)(1,Y.G),(0,F.w6)("design:type",Function),(0,F.w6)("design:paramtypes",[v.N,String]),(0,F.w6)("design:returntype",void 0)],H.prototype,"restoreTrash",null),H=(0,F.gn)([Y.o,(0,F.w6)("design:paramtypes",[ie.eN,Pe.n])],H);var N=p(7022),x=p(14745),T=p(65862),k=p(93614),Z=p(95463),z=p(90070),A=p(48168),M=p(76111),m=p(32337),f=p(60312),P=p(41582),h=p(56310),$=p(87925),K=p(94276);function no(n,s){if(1&n&&(e.TgZ(0,"option",6),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("ngValue",t),e.xp6(1),e.Oqu(t)}}function io(n,s){if(1&n&&(e.TgZ(0,"select",5),e._UZ(1,"option",6),e.YNc(2,no,2,2,"option",7),e.qZA()),2&n){const t=e.oxw();e.s9C("id",t.setting),e.s9C("name",t.setting),e.Q6J("formControlName",t.setting),e.xp6(1),e.Q6J("ngValue",null),e.xp6(1),e.Q6J("ngForOf",t.limits.values)}}function so(n,s){if(1&n&&e._UZ(0,"input",10),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function _o(n,s){if(1&n&&e._UZ(0,"input",11),2&n){const t=e.oxw(2);e.Q6J("formControlName",t.setting)}}function ao(n,s){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"div",12),e._UZ(3,"input",13),e.TgZ(4,"label",14),e._uU(5,"Yes"),e.qZA()(),e.TgZ(6,"div",12),e._UZ(7,"input",13),e.TgZ(8,"label",14),e._uU(9,"No"),e.qZA()(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(3),e.Q6J("id",t.setting+"True")("value",!0)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"True"),e.xp6(3),e.Q6J("id",t.setting+"False")("value",!1)("formControlName",t.setting),e.xp6(1),e.Q6J("for",t.setting+"False")}}function ro(n,s){if(1&n&&(e.TgZ(0,"span"),e.YNc(1,so,1,1,"input",8),e.YNc(2,_o,1,1,"input",9),e.YNc(3,ao,10,8,"ng-container",3),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf","int"===t.limits.type),e.xp6(1),e.Q6J("ngIf","str"===t.limits.type),e.xp6(1),e.Q6J("ngIf","bool"===t.limits.type)}}function lo(n,s){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,16),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.min),e.QtT(2)}}function co(n,s){if(1&n&&(e.TgZ(0,"span",15),e.ynx(1),e.SDv(2,17),e.BQk(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.limits.max),e.QtT(2)}}let mt=(()=>{class n{ngOnInit(){const t=[];"min"in this.limits&&t.push(a.kI.min(this.limits.min)),"max"in this.limits&&t.push(a.kI.max(this.limits.max)),this.settingsForm.get(this.setting).setValidators(t)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-setting"]],inputs:{settingsForm:"settingsForm",formDir:"formDir",setting:"setting",limits:"limits"},decls:7,vars:7,consts:function(){let s,t;return s="Must be greater than or equal to " + "\ufffd0\ufffd" + ".",t="Must be less than or equal to " + "\ufffd0\ufffd" + ".",[[1,"form-group",3,"formGroup"],[1,"col-form-label",3,"for"],["class","form-control",3,"id","name","formControlName",4,"ngIf"],[4,"ngIf"],["class","invalid-feedback",4,"ngIf"],[1,"form-control",3,"id","name","formControlName"],[3,"ngValue"],[3,"ngValue",4,"ngFor","ngForOf"],["type","number","class","form-control",3,"formControlName",4,"ngIf"],["type","text","class","form-control",3,"formControlName",4,"ngIf"],["type","number",1,"form-control",3,"formControlName"],["type","text",1,"form-control",3,"formControlName"],[1,"custom-control","custom-radio","custom-control-inline"],["type","radio",1,"custom-control-input",3,"id","value","formControlName"],[1,"custom-control-label",3,"for"],[1,"invalid-feedback"],s,t]},template:function(t,o){1&t&&(e.TgZ(0,"div",0)(1,"label",1),e._uU(2),e.qZA(),e.YNc(3,io,3,5,"select",2),e.YNc(4,ro,4,3,"span",3),e.YNc(5,lo,3,1,"span",4),e.YNc(6,co,3,1,"span",4),e.qZA()),2&t&&(e.Q6J("formGroup",o.settingsForm),e.xp6(1),e.s9C("for",o.setting),e.xp6(1),e.Oqu(o.setting),e.xp6(1),e.Q6J("ngIf","enum"===o.limits.type),e.xp6(1),e.Q6J("ngIf","enum"!==o.limits.type),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"min")),e.xp6(1),e.Q6J("ngIf",o.settingsForm.showError(o.setting,o.formDir,"max")))},directives:[h.P,a.JL,a.sg,P.V,c.O5,$.o,a.EJ,K.b,a.JJ,a.u,a.YN,a.Kr,c.sg,a.wV,a.Fj,a._],styles:[""]}),n})();var j=p(30839),Je=p(88820);function po(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function uo(n,s){if(1&n&&(e.TgZ(0,"span")(1,"legend",10),e.SDv(2,21),e.qZA(),e.TgZ(3,"div",12)(4,"div",13)(5,"label",22),e.SDv(6,23),e.qZA(),e._UZ(7,"input",24),e.YNc(8,po,2,0,"span",25),e.qZA()(),e.TgZ(9,"div",12)(10,"div",13)(11,"label",26),e.SDv(12,27),e.qZA(),e._UZ(13,"input",28),e.qZA()()()),2&n){const t=e.oxw(),o=e.MAs(9);e.xp6(8),e.Q6J("ngIf",t.settingsForm.showError("lun",o,"required"))}}function mo(n,s){if(1&n&&(e.TgZ(0,"option",31),e._uU(1),e.ALo(2,"iscsiBackstore"),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(e.lcZ(2,2,t))}}function go(n,s){if(1&n&&(e.TgZ(0,"div",12)(1,"div",13),e._UZ(2,"cd-iscsi-setting",33),e.qZA()()),2&n){const t=s.$implicit,o=e.oxw(2).$implicit,i=e.oxw(),_=e.MAs(9);e.xp6(2),e.Q6J("settingsForm",i.settingsForm)("formDir",_)("setting",t.key)("limits",i.getDiskControlLimits(o,t.key))}}function To(n,s){if(1&n&&(e.ynx(0),e.YNc(1,go,3,4,"div",32),e.ALo(2,"keyvalue"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngForOf",e.lcZ(2,1,o.disk_default_controls[t]))}}function fo(n,s){if(1&n&&(e.ynx(0),e.YNc(1,To,3,3,"ng-container",9),e.BQk()),2&n){const t=s.$implicit,o=e.oxw();e.xp6(1),e.Q6J("ngIf",o.settingsForm.value.backstore===t)}}let Co=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={backstore:new a.NI(this.imagesSettings[this.image].backstore),lun:new a.NI(this.imagesSettings[this.image].lun),wwn:new a.NI(this.imagesSettings[this.image].wwn)};C().forEach(this.backstores,o=>{const i=this.imagesSettings[this.image][o]||{};C().forIn(this.disk_default_controls[o],(_,r)=>{t[r]=new a.NI(i[r])})}),this.settingsForm=new Z.d(t)}getDiskControlLimits(t,o){return this.disk_controls_limits?this.disk_controls_limits[t][o]:{type:"int"}}save(){const t=this.settingsForm.controls.backstore.value,o=this.settingsForm.controls.lun.value,i=this.settingsForm.controls.wwn.value,_={};C().forIn(this.settingsForm.controls,(r,l)=>{""!==r.value&&null!==r.value&&l in this.disk_default_controls[this.settingsForm.value.backstore]&&(_[l]=r.value,C().forEach(this.backstores,d=>{d!==t&&l in(this.imagesSettings[this.image][d]||{})&&(this.imagesSettings[this.image][d][l]=r.value)}))}),this.imagesSettings[this.image].backstore=t,this.imagesSettings[this.image].lun=o,this.imagesSettings[this.image].wwn=i,this.imagesSettings[this.image][t]=_,this.imagesSettings=Object.assign({},this.imagesSettings),this.control.updateValueAndValidity({emitEvent:!1}),this.activeModal.close()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(X),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-image-settings-modal"]],decls:25,vars:8,consts:function(){let s,t,o,i,_,r,l,d;return s="Configure",t="Changing these parameters from their default values is usually not necessary.",o="Settings",i="Backstore",_="Identifier",r="lun",l="wwn",d="This field is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","settingsForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,[4,"ngIf"],[1,"cd-header"],o,[1,"form-group","row"],[1,"col-sm-12"],[1,"col-form-label"],i,["id","backstore","name","backstore","formControlName","backstore",1,"form-select"],[3,"value",4,"ngFor","ngForOf"],[4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],_,["for","lun",1,"col-form-label","required"],r,["type","number","id","lun","name","lun","formControlName","lun",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","wwn",1,"col-form-label"],l,["type","text","id","wwn","name","wwn","formControlName","wwn",1,"form-control"],[1,"invalid-feedback"],d,[3,"value"],["class","form-group row",4,"ngFor","ngForOf"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1)(2),e.SDv(3,2),e.BQk(),e._uU(4,"\xa0 "),e.TgZ(5,"small"),e._uU(6),e.qZA(),e.BQk(),e.ynx(7,3),e.TgZ(8,"form",4,5)(10,"div",6)(11,"p",7),e.SDv(12,8),e.qZA(),e.YNc(13,uo,14,1,"span",9),e.TgZ(14,"legend",10),e.SDv(15,11),e.qZA(),e.TgZ(16,"div",12)(17,"div",13)(18,"label",14),e.SDv(19,15),e.qZA(),e.TgZ(20,"select",16),e.YNc(21,mo,3,4,"option",17),e.qZA()()(),e.YNc(22,fo,2,1,"ng-container",18),e.qZA(),e.TgZ(23,"div",19)(24,"cd-form-button-panel",20),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA()()(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(6),e.Oqu(o.image),e.xp6(2),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngIf",o.api_version>=1),e.xp6(8),e.Q6J("ngForOf",o.backstores),e.xp6(1),e.Q6J("ngForOf",o.backstores),e.xp6(2),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},directives:[f.z,a._Y,a.JL,a.sg,P.V,c.O5,h.P,$.o,a.wV,a.Fj,K.b,a.JJ,a.u,a.EJ,c.sg,a.YN,a.Kr,mt,j.p],pipes:[Je.V,c.Nd],styles:[""]}),n})();function So(n,s){if(1&n&&(e.TgZ(0,"div",12)(1,"div",13),e._UZ(2,"cd-iscsi-setting",14),e.qZA()()),2&n){const t=s.$implicit,o=e.oxw(),i=e.MAs(5);e.xp6(2),e.Q6J("settingsForm",o.settingsForm)("formDir",i)("setting",t.key)("limits",o.getTargetControlLimits(t.key))}}let Ro=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.iscsiService=o,this.actionLabels=i}ngOnInit(){const t={};C().forIn(this.target_default_controls,(o,i)=>{t[i]=new a.NI(this.target_controls.value[i])}),this.settingsForm=new Z.d(t)}save(){const t={};C().forIn(this.settingsForm.controls,(o,i)=>{""===o.value||null===o.value||(t[i]=o.value)}),this.target_controls.setValue(t),this.activeModal.close()}getTargetControlLimits(t){return this.target_controls_limits?this.target_controls_limits[t]:["Yes","No"].includes(this.target_default_controls[t])?{type:"bool"}:{type:"int"}}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(X),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-iqn-settings-modal"]],decls:13,vars:7,consts:function(){let s,t;return s="Advanced Settings",t="Changing these parameters from their default values is usually not necessary.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","settingsForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"alert-warning"],t,["class","form-group row",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"form-group","row"],[1,"col-sm-12"],[3,"settingsForm","formDir","setting","limits"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p",7),e.SDv(8,8),e.qZA(),e.YNc(9,So,3,4,"div",9),e.ALo(10,"keyvalue"),e.qZA(),e.TgZ(11,"div",10)(12,"cd-form-button-panel",11),e.NdJ("submitActionEvent",function(){return o.save()}),e.qZA()()(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.settingsForm),e.xp6(5),e.Q6J("ngForOf",e.lcZ(10,5,o.settingsForm.controls)),e.xp6(3),e.Q6J("form",o.settingsForm)("submitText",o.actionLabels.UPDATE))},directives:[f.z,a._Y,a.JL,a.sg,P.V,c.sg,h.P,mt,j.p],pipes:[c.Nd],styles:[""]}),n})();var de=p(63285),gt=p(63622);let Eo=(()=>{class n{constructor(t){this.ngControl=t}onInput(t){this.setValue(t)}setValue(t){t=C().isString(t)?t.trim():t,this.ngControl.control.setValue(t)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(a.a5))},n.\u0275dir=e.lG2({type:n,selectors:[["","cdTrim",""]],hostBindings:function(t,o){1&t&&e.NdJ("input",function(_){return o.onInput(_.target.value)})}}),n})();var Mo=p(39092),Tt=p(4416),Ye=p(58039),tt=p(10545);function Oo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,41),e.qZA())}function ho(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,42),e.qZA())}function Ao(n,s){1&n&&(e.TgZ(0,"span",40),e.ynx(1),e.SDv(2,43),e.BQk(),e._UZ(3,"br"),e.ynx(4),e.SDv(5,44),e.BQk(),e._UZ(6,"br"),e.TgZ(7,"a",45),e.SDv(8,46),e.qZA()())}function Po(n,s){1&n&&(e.TgZ(0,"span",47),e.SDv(1,48),e.qZA())}const V=function(n){return[n]};function Io(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,r=i.$implicit;return e.oxw(2).removePortal(_,r)}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,V,o.icons.destroy))}}function bo(n,s){if(1&n&&(e.TgZ(0,"span",40),e.SDv(1,52),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.pQV(t.minimum_gateways),e.QtT(1)}}function No(n,s){if(1&n&&(e.TgZ(0,"div",55),e._uU(1),e.qZA()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(1),e.hij("lun: ",o.imagesSettings[t].lun,"")}}function Fo(n,s){if(1&n&&(e.ynx(0),e.SDv(1,56),e.ALo(2,"iscsiBackstore"),e.BQk()),2&n){const t=e.oxw().$implicit,o=e.oxw(2);e.xp6(2),e.pQV(e.lcZ(2,1,o.imagesSettings[t].backstore)),e.QtT(1)}}function Do(n,s){1&n&&(e.ynx(0),e.SDv(1,57),e.BQk())}function Lo(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.YNc(3,No,2,1,"div",53),e.TgZ(4,"button",51),e.NdJ("click",function(){const _=e.CHM(t).$implicit;return e.oxw(2).imageSettingsModal(_)}),e._UZ(5,"i",15),e.qZA(),e.TgZ(6,"button",51),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,r=i.$implicit;return e.oxw(2).removeImage(_,r)}),e._UZ(7,"i",15),e.qZA()(),e.TgZ(8,"span",47),e.YNc(9,Fo,3,3,"ng-container",54),e.YNc(10,Do,2,0,"ng-container",54),e.qZA(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(2);e.xp6(2),e.Q6J("value",t),e.xp6(1),e.Q6J("ngIf",o.api_version>=1),e.xp6(2),e.Q6J("ngClass",e.VKq(6,V,o.icons.deepCheck)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,V,o.icons.destroy)),e.xp6(2),e.Q6J("ngIf",o.backstores.length>1),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.imagesSettings[t][o.imagesSettings[t].backstore]))}}function vo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,58),e.qZA())}function $o(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,59),e.qZA())}function Bo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,77),e.qZA())}function Go(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,78),e.qZA())}function yo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,79),e.qZA())}function xo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,80),e.qZA())}function Zo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,81),e.qZA())}function wo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,82),e.qZA())}function Ho(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,83),e.qZA())}function ko(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,84),e.qZA())}function qo(n,s){if(1&n&&(e.TgZ(0,"div",60)(1,"div",8)(2,"label",61),e.ynx(3),e.SDv(4,62),e.BQk(),e.qZA(),e.TgZ(5,"div",11),e._UZ(6,"input",63),e.YNc(7,Bo,2,0,"span",16),e.YNc(8,Go,2,0,"span",16),e.qZA()(),e.TgZ(9,"div",8)(10,"label",64),e.ynx(11),e.SDv(12,65),e.BQk(),e.qZA(),e.TgZ(13,"div",11)(14,"div",12),e._UZ(15,"input",66)(16,"button",67)(17,"cd-copy-2-clipboard-button",68),e.qZA(),e.YNc(18,yo,2,0,"span",16),e.YNc(19,xo,2,0,"span",16),e.qZA()(),e.TgZ(20,"div",8)(21,"label",69),e.ynx(22),e.SDv(23,70),e.BQk(),e.qZA(),e.TgZ(24,"div",11),e._UZ(25,"input",71),e.YNc(26,Zo,2,0,"span",16),e.YNc(27,wo,2,0,"span",16),e.qZA()(),e.TgZ(28,"div",8)(29,"label",72),e.ynx(30),e.SDv(31,73),e.BQk(),e.qZA(),e.TgZ(32,"div",11)(33,"div",12),e._UZ(34,"input",74)(35,"button",75)(36,"cd-copy-2-clipboard-button",76),e.qZA(),e.YNc(37,Ho,2,0,"span",16),e.YNc(38,ko,2,0,"span",16),e.qZA()()()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("user",t,"pattern")),e.xp6(10),e.Q6J("ngIf",o.targetForm.showError("password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("password",t,"pattern")),e.xp6(7),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_user",t,"pattern")),e.xp6(10),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("mutual_password",t,"pattern"))}}function Ko(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,114),e.qZA())}function Xo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,115),e.qZA())}function Qo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,116),e.qZA())}function zo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,117),e.qZA())}function Jo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,118),e.qZA())}function Yo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,119),e.qZA())}function Vo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,120),e.qZA())}function Uo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,121),e.qZA())}function jo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,122),e.qZA())}function Wo(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,123),e.qZA())}function en(n,s){1&n&&(e.TgZ(0,"span",40),e.SDv(1,124),e.qZA())}function tn(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const i=e.CHM(t),_=i.index,r=i.$implicit,l=e.oxw(),d=l.$implicit,u=l.index;return e.oxw(3).removeInitiatorImage(d,_,u,r)}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,V,o.icons.destroy))}}function on(n,s){1&n&&(e.TgZ(0,"span"),e.SDv(1,125),e.qZA())}function nn(n,s){if(1&n&&(e.TgZ(0,"div",21)(1,"div",22)(2,"cd-select",126),e._UZ(3,"i",24),e.ynx(4),e.SDv(5,127),e.BQk(),e.qZA()()()),2&n){const t=e.oxw(),o=t.$implicit,i=t.index,_=e.oxw(3);e.xp6(2),e.Q6J("data",o.getValue("luns"))("options",_.imagesInitiatorSelections[i])("messages",_.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(4,V,_.icons.add))}}function sn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",91)(1,"div",5),e.ynx(2),e.SDv(3,92),e.BQk(),e._uU(4),e.TgZ(5,"button",93),e.NdJ("click",function(){const _=e.CHM(t).index;return e.oxw(3).removeInitiator(_)}),e.qZA()(),e.TgZ(6,"div",7)(7,"div",8)(8,"label",94),e.SDv(9,95),e.qZA(),e.TgZ(10,"div",11)(11,"input",96),e.NdJ("blur",function(){return e.CHM(t),e.oxw(3).updatedInitiatorSelector()}),e.qZA(),e.YNc(12,Ko,2,0,"span",16),e.YNc(13,Xo,2,0,"span",16),e.YNc(14,Qo,2,0,"span",16),e.qZA()(),e.ynx(15,60),e.TgZ(16,"div",8)(17,"label",97),e.SDv(18,98),e.qZA(),e.TgZ(19,"div",11),e._UZ(20,"input",99),e.YNc(21,zo,2,0,"span",16),e.YNc(22,Jo,2,0,"span",16),e.qZA()(),e.TgZ(23,"div",8)(24,"label",100),e.SDv(25,101),e.qZA(),e.TgZ(26,"div",11)(27,"div",12),e._UZ(28,"input",102)(29,"button",103)(30,"cd-copy-2-clipboard-button",104),e.qZA(),e.YNc(31,Yo,2,0,"span",16),e.YNc(32,Vo,2,0,"span",16),e.qZA()(),e.TgZ(33,"div",8)(34,"label",105),e.ynx(35),e.SDv(36,106),e.BQk(),e.qZA(),e.TgZ(37,"div",11),e._UZ(38,"input",107),e.YNc(39,Uo,2,0,"span",16),e.YNc(40,jo,2,0,"span",16),e.qZA()(),e.TgZ(41,"div",8)(42,"label",108),e.SDv(43,109),e.qZA(),e.TgZ(44,"div",11)(45,"div",12),e._UZ(46,"input",110)(47,"button",103)(48,"cd-copy-2-clipboard-button",104),e.qZA(),e.YNc(49,Wo,2,0,"span",16),e.YNc(50,en,2,0,"span",16),e.qZA()(),e.BQk(),e.TgZ(51,"div",8)(52,"label",111),e.SDv(53,112),e.qZA(),e.TgZ(54,"div",11),e.YNc(55,tn,5,4,"ng-container",20),e.YNc(56,on,2,0,"span",54),e.YNc(57,nn,6,6,"div",113),e.qZA()()()()}if(2&n){const t=s.$implicit,o=s.index;e.oxw(2);const i=e.MAs(2);e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("client_iqn")," "),e.xp6(8),e.Q6J("ngIf",t.showError("client_iqn",i,"notUnique")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("client_iqn",i,"pattern")),e.xp6(6),e.Q6J("id","user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("user",i,"pattern")),e.xp6(6),e.Q6J("id","password"+o),e.xp6(1),e.Q6J("cdPasswordButton","password"+o),e.xp6(1),e.Q6J("source","password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("password",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_user"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_user",i,"pattern")),e.xp6(6),e.Q6J("id","mutual_password"+o),e.xp6(1),e.Q6J("cdPasswordButton","mutual_password"+o),e.xp6(1),e.Q6J("source","mutual_password"+o),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",t.showError("mutual_password",i,"pattern")),e.xp6(5),e.Q6J("ngForOf",t.getValue("luns")),e.xp6(1),e.Q6J("ngIf",t.getValue("cdIsInGroup")),e.xp6(1),e.Q6J("ngIf",!t.getValue("cdIsInGroup"))}}function _n(n,s){1&n&&(e.TgZ(0,"span",47),e.SDv(1,128),e.qZA())}function an(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",8)(1,"label",85),e.SDv(2,86),e.qZA(),e.TgZ(3,"div",87),e.YNc(4,sn,58,24,"div",88),e.TgZ(5,"div",21)(6,"div",22),e.YNc(7,_n,2,0,"span",17),e.TgZ(8,"button",89),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addInitiator(),!1}),e._UZ(9,"i",24),e.ynx(10),e.SDv(11,90),e.BQk(),e.qZA()()(),e._UZ(12,"hr"),e.qZA()()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.initiators.controls),e.xp6(3),e.Q6J("ngIf",0===t.initiators.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,V,t.icons.add))}}function rn(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const _=e.CHM(t).index,r=e.oxw(),l=r.$implicit,d=r.index;return e.oxw(3).removeGroupInitiator(l,_,d)}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,V,o.icons.destroy))}}function ln(n,s){if(1&n){const t=e.EpF();e.ynx(0),e.TgZ(1,"div",49),e._UZ(2,"input",50),e.TgZ(3,"button",51),e.NdJ("click",function(){const _=e.CHM(t).index,r=e.oxw(),l=r.$implicit,d=r.index;return e.oxw(3).removeGroupDisk(l,_,d)}),e._UZ(4,"i",15),e.qZA()(),e.BQk()}if(2&n){const t=s.$implicit,o=e.oxw(4);e.xp6(2),e.Q6J("value",t),e.xp6(2),e.Q6J("ngClass",e.VKq(2,V,o.icons.destroy))}}function cn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",91)(1,"div",5),e.ynx(2),e.SDv(3,132),e.BQk(),e._uU(4),e.TgZ(5,"button",93),e.NdJ("click",function(){const _=e.CHM(t).index;return e.oxw(3).removeGroup(_)}),e.qZA()(),e.TgZ(6,"div",7)(7,"div",8)(8,"label",133),e.SDv(9,134),e.qZA(),e.TgZ(10,"div",11),e._UZ(11,"input",135),e.qZA()(),e.TgZ(12,"div",8)(13,"label",136),e.ynx(14),e.SDv(15,137),e.BQk(),e.qZA(),e.TgZ(16,"div",11),e.YNc(17,rn,5,4,"ng-container",20),e.TgZ(18,"div",21)(19,"div",22)(20,"cd-select",23),e.NdJ("selection",function(i){const r=e.CHM(t).index;return e.oxw(3).onGroupMemberSelection(i,r)}),e._UZ(21,"i",24),e.ynx(22),e.SDv(23,138),e.BQk(),e.qZA()()(),e._UZ(24,"hr"),e.qZA()(),e.TgZ(25,"div",8)(26,"label",27),e.ynx(27),e.SDv(28,139),e.BQk(),e.qZA(),e.TgZ(29,"div",11),e.YNc(30,ln,5,4,"ng-container",20),e.TgZ(31,"div",21)(32,"div",22)(33,"cd-select",126),e._UZ(34,"i",24),e.ynx(35),e.SDv(36,140),e.BQk(),e.qZA()()(),e._UZ(37,"hr"),e.qZA()()()()}if(2&n){const t=s.$implicit,o=s.index,i=e.oxw(3);e.Q6J("formGroup",t),e.xp6(4),e.hij(": ",t.getValue("group_id")," "),e.xp6(13),e.Q6J("ngForOf",t.getValue("members")),e.xp6(3),e.Q6J("data",t.getValue("members"))("options",i.groupMembersSelections[o])("messages",i.messages.groupInitiator),e.xp6(1),e.Q6J("ngClass",e.VKq(12,V,i.icons.add)),e.xp6(9),e.Q6J("ngForOf",t.getValue("disks")),e.xp6(3),e.Q6J("data",t.getValue("disks"))("options",i.groupDiskSelections[o])("messages",i.messages.initiatorImage),e.xp6(1),e.Q6J("ngClass",e.VKq(14,V,i.icons.add))}}function dn(n,s){1&n&&(e.TgZ(0,"span",47),e.SDv(1,141),e.qZA())}function pn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",8)(1,"label",85),e.SDv(2,129),e.qZA(),e.TgZ(3,"div",130),e.YNc(4,cn,38,16,"div",88),e.TgZ(5,"div",21)(6,"div",22),e.YNc(7,dn,2,0,"span",17),e.TgZ(8,"button",89),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).addGroup(),!1}),e._UZ(9,"i",24),e.ynx(10),e.SDv(11,131),e.BQk(),e.qZA()()()()()}if(2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",t.groups.controls),e.xp6(3),e.Q6J("ngIf",0===t.groups.controls.length),e.xp6(2),e.Q6J("ngClass",e.VKq(3,V,t.icons.add))}}function un(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7)(9,"div",8)(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11)(13,"div",12),e._UZ(14,"input",13),e.TgZ(15,"button",14),e.NdJ("click",function(){return e.CHM(t),e.oxw().targetSettingsModal()}),e._UZ(16,"i",15),e.qZA()(),e.YNc(17,Oo,2,0,"span",16),e.YNc(18,ho,2,0,"span",16),e.YNc(19,Ao,9,0,"span",16),e.YNc(20,Po,2,0,"span",17),e._UZ(21,"hr"),e.qZA()(),e.TgZ(22,"div",8)(23,"label",18),e.SDv(24,19),e.qZA(),e.TgZ(25,"div",11),e.YNc(26,Io,5,4,"ng-container",20),e.TgZ(27,"div",21)(28,"div",22)(29,"cd-select",23),e.NdJ("selection",function(i){return e.CHM(t),e.oxw().onPortalSelection(i)}),e._UZ(30,"i",24),e.ynx(31),e.SDv(32,25),e.BQk(),e.qZA()()(),e._UZ(33,"input",26),e.YNc(34,bo,2,1,"span",16),e._UZ(35,"hr"),e.qZA()(),e.TgZ(36,"div",8)(37,"label",27),e.SDv(38,28),e.qZA(),e.TgZ(39,"div",11),e.YNc(40,Lo,11,10,"ng-container",20),e._UZ(41,"input",29),e.YNc(42,vo,2,0,"span",16),e.YNc(43,$o,2,0,"span",16),e.TgZ(44,"div",21)(45,"div",22)(46,"cd-select",23),e.NdJ("selection",function(i){return e.CHM(t),e.oxw().onImageSelection(i)}),e._UZ(47,"i",24),e.ynx(48),e.SDv(49,30),e.BQk(),e.qZA()()(),e._UZ(50,"hr"),e.qZA()(),e.TgZ(51,"div",8)(52,"div",31)(53,"div",32),e._UZ(54,"input",33),e.TgZ(55,"label",34),e.SDv(56,35),e.qZA()(),e._UZ(57,"hr"),e.qZA()(),e.YNc(58,qo,39,8,"div",36),e.YNc(59,an,13,5,"div",37),e.YNc(60,pn,12,5,"div",37),e.qZA(),e.TgZ(61,"div",38)(62,"cd-form-button-panel",39),e.NdJ("submitActionEvent",function(){return e.CHM(t),e.oxw().submit()}),e.ALo(63,"titlecase"),e.ALo(64,"upperFirst"),e.qZA()()()()()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.targetForm),e.xp6(6),e.pQV(e.lcZ(6,26,o.action))(e.lcZ(7,28,o.resource)),e.QtT(5),e.xp6(9),e.Q6J("ngClass",e.VKq(34,V,o.icons.deepCheck)),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"required")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"pattern")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("target_iqn",t,"iqn")),e.xp6(1),e.Q6J("ngIf",o.hasAdvancedSettings(o.targetForm.getValue("target_controls"))),e.xp6(6),e.Q6J("ngForOf",o.portals.value),e.xp6(3),e.Q6J("data",o.portals.value)("options",o.portalsSelections)("messages",o.messages.portals),e.xp6(1),e.Q6J("ngClass",e.VKq(36,V,o.icons.add)),e.xp6(4),e.Q6J("ngIf",o.targetForm.showError("portals",t,"minGateways")),e.xp6(6),e.Q6J("ngForOf",o.targetForm.getValue("disks")),e.xp6(2),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupLunId")),e.xp6(1),e.Q6J("ngIf",o.targetForm.showError("disks",t,"dupWwn")),e.xp6(3),e.Q6J("data",o.disks.value)("options",o.imagesSelections)("messages",o.messages.images),e.xp6(1),e.Q6J("ngClass",e.VKq(38,V,o.icons.add)),e.xp6(11),e.Q6J("ngIf",o.cephIscsiConfigVersion>10&&!o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(1),e.Q6J("ngIf",o.targetForm.getValue("acl_enabled")),e.xp6(2),e.Q6J("form",o.targetForm)("submitText",e.lcZ(63,30,o.action)+" "+e.lcZ(64,32,o.resource))}}let ft=(()=>{class n extends k.E{constructor(t,o,i,_,r,l,d){super(),this.iscsiService=t,this.modalService=o,this.rbdService=i,this.router=_,this.route=r,this.taskWrapper=l,this.actionLabels=d,this.api_version=0,this.minimum_gateways=1,this.icons=T.P,this.isEdit=!1,this.portalsSelections=[],this.imagesInitiatorSelections=[],this.groupDiskSelections=[],this.groupMembersSelections=[],this.imagesSettings={},this.messages={portals:new N.a({noOptions:"There are no portals available."}),images:new N.a({noOptions:"There are no images available."}),initiatorImage:new N.a({noOptions:"There are no images available. Please make sure you add an image to the target."}),groupInitiator:new N.a({noOptions:"There are no initiators available. Please make sure you add an initiator to the target."})},this.IQN_REGEX=/^iqn\.(19|20)\d\d-(0[1-9]|1[0-2])\.\D{2,3}(\.[A-Za-z0-9-]+)+(:[A-Za-z0-9-\.]+)*$/,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.resource="target"}ngOnInit(){const t=new A.E(()=>{});t.pageInfo.limit=-1;const o=[this.iscsiService.listTargets(),this.rbdService.list(t.toParams()),this.iscsiService.portals(),this.iscsiService.settings(),this.iscsiService.version()];this.router.url.startsWith("/block/iscsi/targets/edit")&&(this.isEdit=!0,this.route.params.subscribe(i=>{this.target_iqn=decodeURIComponent(i.target_iqn),o.push(this.iscsiService.getTarget(this.target_iqn))})),this.action=this.isEdit?this.actionLabels.EDIT:this.actionLabels.CREATE,(0,le.D)(o).subscribe(i=>{const _=C()(i[0]).filter(l=>l.target_iqn!==this.target_iqn).flatMap(l=>l.disks).map(l=>`${l.pool}/${l.image}`).value();"api_version"in i[3]&&(this.api_version=i[3].api_version),this.minimum_gateways=i[3].config.minimum_gateways,this.target_default_controls=i[3].target_default_controls,this.target_controls_limits=i[3].target_controls_limits,this.disk_default_controls=i[3].disk_default_controls,this.disk_controls_limits=i[3].disk_controls_limits,this.backstores=i[3].backstores,this.default_backstore=i[3].default_backstore,this.unsupported_rbd_features=i[3].unsupported_rbd_features,this.required_rbd_features=i[3].required_rbd_features,this.imagesAll=C()(i[1]).flatMap(l=>l.value).filter(l=>!l.namespace&&!(-1!==_.indexOf(`${l.pool_name}/${l.name}`)||0===this.getValidBackstores(l).length)).value(),this.imagesSelections=this.imagesAll.map(l=>new x.$(!1,`${l.pool_name}/${l.name}`,""));const r=[];i[2].forEach(l=>{l.ip_addresses.forEach(d=>{r.push(new x.$(!1,l.name+":"+d,""))})}),this.portalsSelections=[...r],this.cephIscsiConfigVersion=i[4].ceph_iscsi_config_version,this.createForm(),i[5]&&this.resolveModel(i[5]),this.loadingReady()})}createForm(){if(this.targetForm=new Z.d({target_iqn:new a.NI("iqn.2001-07.com.ceph:"+Date.now(),{validators:[a.kI.required,a.kI.pattern(this.IQN_REGEX)]}),target_controls:new a.NI({}),portals:new a.NI([],{validators:[z.h.custom("minGateways",t=>C().uniq(t.map(i=>i.split(":")[0])).length{const o=this.getLunIds(t);return o.length!==C().uniq(o).length}),z.h.custom("dupWwn",t=>{const o=this.getWwns(t);return o.length!==C().uniq(o).length})]}),initiators:new a.Oe([]),groups:new a.Oe([]),acl_enabled:new a.NI(!1)}),this.cephIscsiConfigVersion>10){const t=new Z.d({user:new a.NI(""),password:new a.NI(""),mutual_user:new a.NI(""),mutual_password:new a.NI("")});this.setAuthValidator(t),this.targetForm.addControl("auth",t)}}resolveModel(t){this.targetForm.patchValue({target_iqn:t.target_iqn,target_controls:t.target_controls,acl_enabled:t.acl_enabled}),this.cephIscsiConfigVersion>10&&this.targetForm.patchValue({auth:t.auth});const o=[];C().forEach(t.portals,_=>{o.push(`${_.host}:${_.ip}`)}),this.targetForm.patchValue({portals:o});const i=[];C().forEach(t.disks,_=>{const r=`${_.pool}/${_.image}`;i.push(r),this.imagesSettings[r]={backstore:_.backstore},this.imagesSettings[r][_.backstore]=_.controls,"lun"in _&&(this.imagesSettings[r].lun=_.lun),"wwn"in _&&(this.imagesSettings[r].wwn=_.wwn),this.onImageSelection({option:{name:r,selected:!0}})}),this.targetForm.patchValue({disks:i}),C().forEach(t.clients,_=>{const r=this.addInitiator();_.luns=C().map(_.luns,l=>`${l.pool}/${l.image}`),r.patchValue(_)}),t.groups.forEach((_,r)=>{const l=this.addGroup();_.disks=C().map(_.disks,d=>`${d.pool}/${d.image}`),l.patchValue(_),C().forEach(_.members,d=>{this.onGroupMemberSelection({option:new x.$(!0,d,"")},r)})})}hasAdvancedSettings(t){return Object.values(t).length>0}get portals(){return this.targetForm.get("portals")}onPortalSelection(){this.portals.setValue(this.portals.value)}removePortal(t,o){return this.portalsSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.portals.value.splice(t,1),this.portals.setValue(this.portals.value),!1}get disks(){return this.targetForm.get("disks")}removeImage(t,o){return this.imagesSelections.forEach(i=>{i.name===o&&(i.selected=!1)}),this.disks.value.splice(t,1),this.removeImageRefs(o),this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1}),!1}removeImageRefs(t){this.initiators.controls.forEach(o=>{const i=o.value.luns.filter(_=>_!==t);o.get("luns").setValue(i)}),this.groups.controls.forEach(o=>{const i=o.value.disks.filter(_=>_!==t);o.get("disks").setValue(i)}),C().forEach(this.imagesInitiatorSelections,(o,i)=>{this.imagesInitiatorSelections[i]=o.filter(_=>_.name!==t)}),C().forEach(this.groupDiskSelections,(o,i)=>{this.groupDiskSelections[i]=o.filter(_=>_.name!==t)})}getDefaultBackstore(t){let o=this.default_backstore;const i=this.getImageById(t);return this.validFeatures(i,this.default_backstore)||this.backstores.forEach(_=>{_!==this.default_backstore&&this.validFeatures(i,_)&&(o=_)}),o}isLunIdInUse(t,o){const i=this.disks.value.filter(_=>_!==o);return this.getLunIds(i).includes(t)}getLunIds(t){return C().map(t,o=>this.imagesSettings[o].lun)}nextLunId(t){const o=this.disks.value.filter(r=>r!==t),i=this.getLunIds(o);let _=0;for(;i.includes(_);)_++;return _}getWwns(t){return C().map(t,i=>this.imagesSettings[i].wwn).filter(i=>C().isString(i)&&""!==i)}onImageSelection(t){const o=t.option;if(o.selected){if(this.imagesSettings[o.name])this.isLunIdInUse(this.imagesSettings[o.name].lun,o.name)&&(this.imagesSettings[o.name].lun=this.nextLunId(o.name));else{const i=this.getDefaultBackstore(o.name);this.imagesSettings[o.name]={backstore:i,lun:this.nextLunId(o.name)},this.imagesSettings[o.name][i]={}}C().forEach(this.imagesInitiatorSelections,(i,_)=>{i.push(new x.$(!1,o.name,"")),this.imagesInitiatorSelections[_]=[...i]}),C().forEach(this.groupDiskSelections,(i,_)=>{i.push(new x.$(!1,o.name,"")),this.groupDiskSelections[_]=[...i]})}else this.removeImageRefs(o.name);this.targetForm.get("disks").updateValueAndValidity({emitEvent:!1})}get initiators(){return this.targetForm.get("initiators")}addInitiator(){const t=new Z.d({client_iqn:new a.NI("",{validators:[a.kI.required,z.h.custom("notUnique",i=>{const _=this.initiators.controls.reduce(function(r,l){return r.concat(l.value.client_iqn)},[]);return _.indexOf(i)!==_.lastIndexOf(i)}),a.kI.pattern(this.IQN_REGEX)]}),auth:new Z.d({user:new a.NI(""),password:new a.NI(""),mutual_user:new a.NI(""),mutual_password:new a.NI("")}),luns:new a.NI([]),cdIsInGroup:new a.NI(!1)});this.setAuthValidator(t),this.initiators.push(t),C().forEach(this.groupMembersSelections,(i,_)=>{i.push(new x.$(!1,"","")),this.groupMembersSelections[_]=[...i]});const o=C().map(this.targetForm.getValue("disks"),i=>new x.$(!1,i,""));return this.imagesInitiatorSelections.push(o),t}setAuthValidator(t){z.h.validateIf(t.get("user"),()=>t.getValue("password")||t.getValue("mutual_user")||t.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[t.get("password"),t.get("mutual_user"),t.get("mutual_password")]),z.h.validateIf(t.get("password"),()=>t.getValue("user")||t.getValue("mutual_user")||t.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("mutual_user"),t.get("mutual_password")]),z.h.validateIf(t.get("mutual_user"),()=>t.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_password")]),z.h.validateIf(t.get("mutual_password"),()=>t.getValue("mutual_user"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[t.get("user"),t.get("password"),t.get("mutual_user")])}removeInitiator(t){const o=this.initiators.value[t];this.initiators.removeAt(t),C().forEach(this.groupMembersSelections,(i,_)=>{i.splice(t,1),this.groupMembersSelections[_]=[...i]}),this.groups.controls.forEach(i=>{const _=i.value.members.filter(r=>r!==o.client_iqn);i.get("members").setValue(_)}),this.imagesInitiatorSelections.splice(t,1)}updatedInitiatorSelector(){this.initiators.controls.forEach(t=>{t.get("client_iqn").updateValueAndValidity({emitEvent:!1})}),C().forEach(this.groupMembersSelections,(t,o)=>{C().forEach(t,(i,_)=>{const r=i.name;i.name=this.initiators.controls[_].value.client_iqn,this.groups.controls.forEach(l=>{const d=l.value.members,u=d.indexOf(r);-1!==u&&(d[u]=i.name),l.get("members").setValue(d)})}),this.groupMembersSelections[o]=[...this.groupMembersSelections[o]]})}removeInitiatorImage(t,o,i,_){const r=t.getValue("luns");return r.splice(o,1),t.patchValue({luns:r}),this.imagesInitiatorSelections[i].forEach(l=>{l.name===_&&(l.selected=!1)}),!1}get groups(){return this.targetForm.get("groups")}addGroup(){const t=new Z.d({group_id:new a.NI("",{validators:[a.kI.required]}),members:new a.NI([]),disks:new a.NI([])});this.groups.push(t);const o=C().map(this.targetForm.getValue("disks"),_=>new x.$(!1,_,""));this.groupDiskSelections.push(o);const i=C().map(this.initiators.value,_=>new x.$(!1,_.client_iqn,"",!_.cdIsInGroup));return this.groupMembersSelections.push(i),t}removeGroup(t){this.groups.removeAt(t),this.groupMembersSelections[t].filter(i=>i.selected).forEach(i=>{i.selected=!1,this.onGroupMemberSelection({option:i},t)}),this.groupMembersSelections.splice(t,1),this.groupDiskSelections.splice(t,1)}onGroupMemberSelection(t,o){const i=t.option;let _=[];i.selected||(_=this.groupDiskSelections[o].filter(l=>l.selected).map(l=>l.name)),this.initiators.controls.forEach((r,l)=>{r.value.client_iqn===i.name&&(r.patchValue({luns:_}),r.get("cdIsInGroup").setValue(i.selected),C().forEach(this.groupMembersSelections,d=>{d[l].enabled=!i.selected}),this.imagesInitiatorSelections[l].forEach(d=>{d.selected=_.includes(d.name)}))})}removeGroupInitiator(t,o,i){const _=t.getValue("members")[o];t.getValue("members").splice(o,1),this.onGroupMemberSelection({option:new x.$(!1,_,"")},i)}removeGroupDisk(t,o,i){const _=t.getValue("disks")[o];t.getValue("disks").splice(o,1),this.groupDiskSelections[i].forEach(r=>{r.name===_&&(r.selected=!1)}),this.groupDiskSelections[i]=[...this.groupDiskSelections[i]]}submit(){const t=C().cloneDeep(this.targetForm.value),o={target_iqn:this.targetForm.getValue("target_iqn"),target_controls:this.targetForm.getValue("target_controls"),acl_enabled:this.targetForm.getValue("acl_enabled"),portals:[],disks:[],clients:[],groups:[]};if(this.cephIscsiConfigVersion>10){const _=this.targetForm.get("auth");_.getValue("user")||_.get("user").setValue(""),_.getValue("password")||_.get("password").setValue(""),_.getValue("mutual_user")||_.get("mutual_user").setValue(""),_.getValue("mutual_password")||_.get("mutual_password").setValue("");const r=this.targetForm.getValue("acl_enabled");o.auth={user:r?"":_.getValue("user"),password:r?"":_.getValue("password"),mutual_user:r?"":_.getValue("mutual_user"),mutual_password:r?"":_.getValue("mutual_password")}}let i;t.disks.forEach(_=>{const r=_.split("/"),l=this.imagesSettings[_].backstore;o.disks.push({pool:r[0],image:r[1],backstore:l,controls:this.imagesSettings[_][l],lun:this.imagesSettings[_].lun,wwn:this.imagesSettings[_].wwn})}),t.portals.forEach(_=>{const r=_.indexOf(":");o.portals.push({host:_.substring(0,r),ip:_.substring(r+1)})}),o.acl_enabled&&(t.initiators.forEach(_=>{_.auth.user||(_.auth.user=""),_.auth.password||(_.auth.password=""),_.auth.mutual_user||(_.auth.mutual_user=""),_.auth.mutual_password||(_.auth.mutual_password=""),delete _.cdIsInGroup;const r=[];_.luns.forEach(l=>{const d=l.split("/");r.push({pool:d[0],image:d[1]})}),_.luns=r}),o.clients=t.initiators),o.acl_enabled&&(t.groups.forEach(_=>{const r=[];_.disks.forEach(l=>{const d=l.split("/");r.push({pool:d[0],image:d[1]})}),_.disks=r}),o.groups=t.groups),this.isEdit?(o.new_target_iqn=o.target_iqn,o.target_iqn=this.target_iqn,i=this.taskWrapper.wrapTaskAroundCall({task:new M.R("iscsi/target/edit",{target_iqn:o.target_iqn}),call:this.iscsiService.updateTarget(this.target_iqn,o)})):i=this.taskWrapper.wrapTaskAroundCall({task:new M.R("iscsi/target/create",{target_iqn:o.target_iqn}),call:this.iscsiService.createTarget(o)}),i.subscribe({error:()=>{this.targetForm.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/block/iscsi/targets"])})}targetSettingsModal(){const t={target_controls:this.targetForm.get("target_controls"),target_default_controls:this.target_default_controls,target_controls_limits:this.target_controls_limits};this.modalRef=this.modalService.show(Ro,t)}imageSettingsModal(t){const o={imagesSettings:this.imagesSettings,image:t,api_version:this.api_version,disk_default_controls:this.disk_default_controls,disk_controls_limits:this.disk_controls_limits,backstores:this.getValidBackstores(this.getImageById(t)),control:this.targetForm.get("disks")};this.modalRef=this.modalService.show(Co,o)}validFeatures(t,o){const i=t.features,_=this.required_rbd_features[o];return(i&_)===_&&0==(i&this.unsupported_rbd_features[o])}getImageById(t){return this.imagesAll.find(o=>t===`${o.pool_name}/${o.name}`)}getValidBackstores(t){return this.backstores.filter(o=>this.validFeatures(t,o))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(X),e.Y36(de.Z),e.Y36(H),e.Y36(g.F0),e.Y36(g.gz),e.Y36(m.P),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let s,t,o,i,_,r,l,d,u,S,E,I,B,y,Q,J,ee,te,w,_e,ae,O,ue,me,ge,Te,fe,Ce,Se,G,ye,xe,Ze,we,He,ke,qe,Ke,Xe,Qe,ze,b,yt,xt,Zt,wt,Ht,kt,qt,Kt,Xt,Qt,zt,Jt,Yt,Vt,Ut,jt,Wt,eo,to,oo;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Target IQN",o="Portals",i="Add portal",_="Images",r="Add image",l="ACL authentication",d="This field is required.",u="IQN has wrong pattern.",S="An IQN has the following notation 'iqn.$year-$month.$reversedAddress:$definedName'",E="For example: iqn.2016-06.org.dashboard:storage:disk.sn-a8675309",I="More information",B="This target has modified advanced settings.",y="At least " + "\ufffd0\ufffd" + " gateways are required.",Q="Backstore: " + "\ufffd0\ufffd" + ".\xA0",J="This image has modified settings.",ee="Duplicated LUN numbers.",te="Duplicated WWN.",w="User",_e="Password",ae="Mutual User",O="Mutual Password",ue="This field is required.",me="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",ge="This field is required.",Te="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",fe="This field is required.",Ce="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Se="This field is required.",G="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",ye="Initiators",xe="Add initiator",Ze="Initiator",we="Client IQN",He="User",ke="Password",qe="Mutual User",Ke="Mutual Password",Xe="Images",Qe="Initiator IQN needs to be unique.",ze="This field is required.",b="IQN has wrong pattern.",yt="This field is required.",xt="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",Zt="This field is required.",wt="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Ht="This field is required.",kt="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",qt="This field is required.",Kt="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",Xt="Initiator belongs to a group. Images will be configure in the group.",Qt="Add image",zt="No items added.",Jt="Groups",Yt="Add group",Vt="Group",Ut="Name",jt="Initiators",Wt="Add initiator",eo="Images",to="Add image",oo="No items added.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","targetForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],s,[1,"card-body"],[1,"form-group","row"],["for","target_iqn",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],[1,"input-group"],["type","text","id","target_iqn","name","target_iqn","formControlName","target_iqn","cdTrim","",1,"form-control"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],["class","invalid-feedback",4,"ngIf"],["class","form-text text-muted",4,"ngIf"],["for","portals",1,"cd-col-form-label","required"],o,[4,"ngFor","ngForOf"],[1,"row"],[1,"col-md-12"],["elemClass","btn btn-light float-end",3,"data","options","messages","selection"],[3,"ngClass"],i,["type","hidden","id","portals","name","portals","formControlName","portals",1,"form-control"],["for","disks",1,"cd-col-form-label"],_,["type","hidden","id","disks","name","disks","formControlName","disks",1,"form-control"],r,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","formControlName","acl_enabled","name","acl_enabled","id","acl_enabled",1,"custom-control-input"],["for","acl_enabled",1,"custom-control-label"],l,["formGroupName","auth",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,u,S,E,["target","_blank","href","https://en.wikipedia.org/wiki/ISCSI#Addressing"],I,[1,"form-text","text-muted"],B,[1,"input-group","cd-mb"],["type","text","disabled","",1,"cd-form-control",3,"value"],["type","button",1,"btn","btn-light",3,"click"],y,["class","input-group-text",4,"ngIf"],[4,"ngIf"],[1,"input-group-text"],Q,J,ee,te,["formGroupName","auth"],["for","target_user",1,"cd-col-form-label"],w,["type","text","autocomplete","off","id","target_user","name","target_user","formControlName","user",1,"form-control"],["for","target_password",1,"cd-col-form-label"],_e,["type","password","autocomplete","new-password","id","target_password","name","target_password","formControlName","password",1,"form-control"],["type","button","cdPasswordButton","target_password",1,"btn","btn-light"],["source","target_password"],["for","target_mutual_user",1,"cd-col-form-label"],ae,["type","text","autocomplete","off","id","target_mutual_user","name","target_mutual_user","formControlName","mutual_user",1,"form-control"],["for","target_mutual_password",1,"cd-col-form-label"],O,["type","password","autocomplete","new-password","id","target_mutual_password","name","target_mutual_password","formControlName","mutual_password",1,"form-control"],["type","button","cdPasswordButton","target_mutual_password",1,"btn","btn-light"],["source","target_mutual_password"],ue,me,ge,Te,fe,Ce,Se,G,["for","initiators",1,"cd-col-form-label"],ye,["formArrayName","initiators",1,"cd-col-form-input"],["class","card mb-2",3,"formGroup",4,"ngFor","ngForOf"],[1,"btn","btn-light","float-end",3,"click"],xe,[1,"card","mb-2",3,"formGroup"],Ze,["type","button",1,"btn-close","float-end",3,"click"],["for","client_iqn",1,"cd-col-form-label","required"],we,["type","text","formControlName","client_iqn","cdTrim","",1,"form-control",3,"blur"],["for","user",1,"cd-col-form-label"],He,["formControlName","user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","password",1,"cd-col-form-label"],ke,["formControlName","password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["type","button",1,"btn","btn-light",3,"cdPasswordButton"],[3,"source"],["for","mutual_user",1,"cd-col-form-label"],qe,["formControlName","mutual_user","autocomplete","off","type","text",1,"form-control",3,"id"],["for","mutual_password",1,"cd-col-form-label"],Ke,["formControlName","mutual_password","autocomplete","new-password","type","password",1,"form-control",3,"id"],["for","luns",1,"cd-col-form-label"],Xe,["class","row",4,"ngIf"],Qe,ze,b,yt,xt,Zt,wt,Ht,kt,qt,Kt,Xt,["elemClass","btn btn-light float-end",3,"data","options","messages"],Qt,zt,Jt,["formArrayName","groups",1,"cd-col-form-input"],Yt,Vt,["for","group_id",1,"cd-col-form-label","required"],Ut,["type","text","formControlName","group_id",1,"form-control"],["for","members",1,"cd-col-form-label"],jt,Wt,eo,to,oo]},template:function(t,o){1&t&&e.YNc(0,un,65,40,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},directives:[gt.y,a._Y,a.JL,a.sg,P.V,h.P,$.o,a.Fj,K.b,a.JJ,a.u,Eo,c.mk,c.O5,c.sg,Mo.H,a.Wl,a.x0,Tt.C,Ye.s,a.CE,j.p],pipes:[c.rS,tt.m,Je.V],styles:[".cd-mb[_ngcontent-%COMP%]{margin-bottom:10px}"]}),n})();var Ct=p(68136),pe=p(30982),W=p(83697),Le=p(99466),Re=p(68774),St=p(55657),ce=p(38047),ot=p(18001),ve=p(97161),oe=p(47640);function mn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function gn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,31),e.qZA())}function Tn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,32),e.qZA())}function fn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,33),e.qZA())}function Cn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,34),e.qZA())}function Sn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,35),e.qZA())}function Rn(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,36),e.qZA())}function En(n,s){1&n&&(e.TgZ(0,"span",29),e.SDv(1,37),e.qZA())}let Mn=(()=>{class n{constructor(t,o,i,_,r){this.authStorageService=t,this.activeModal=o,this.actionLabels=i,this.iscsiService=_,this.notificationService=r,this.USER_REGEX=/^[\w\.:@_-]{8,64}$/,this.PASSWORD_REGEX=/^[\w@\-_\/]{12,16}$/,this.permission=this.authStorageService.getPermissions().iscsi}ngOnInit(){this.hasPermission=this.permission.update,this.createForm(),this.iscsiService.getDiscovery().subscribe(t=>{this.discoveryForm.patchValue(t)})}createForm(){this.discoveryForm=new Z.d({user:new a.NI({value:"",disabled:!this.hasPermission}),password:new a.NI({value:"",disabled:!this.hasPermission}),mutual_user:new a.NI({value:"",disabled:!this.hasPermission}),mutual_password:new a.NI({value:"",disabled:!this.hasPermission})}),z.h.validateIf(this.discoveryForm.get("user"),()=>this.discoveryForm.getValue("password")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("password"),()=>this.discoveryForm.getValue("user")||this.discoveryForm.getValue("mutual_user")||this.discoveryForm.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("mutual_user"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("mutual_user"),()=>this.discoveryForm.getValue("mutual_password"),[a.kI.required],[a.kI.pattern(this.USER_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_password")]),z.h.validateIf(this.discoveryForm.get("mutual_password"),()=>this.discoveryForm.getValue("mutual_user"),[a.kI.required],[a.kI.pattern(this.PASSWORD_REGEX)],[this.discoveryForm.get("user"),this.discoveryForm.get("password"),this.discoveryForm.get("mutual_user")])}submitAction(){this.iscsiService.updateDiscovery(this.discoveryForm.value).subscribe(()=>{this.notificationService.show(ot.k.success,"Updated discovery authentication"),this.activeModal.close()},()=>{this.discoveryForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(L.Kz),e.Y36(D.p4),e.Y36(X),e.Y36(ve.g))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-discovery-modal"]],decls:44,vars:13,consts:function(){let s,t,o,i,_,r,l,d,u,S,E,I,B;return s="Discovery Authentication",t="User",o="Password",i="Mutual User",_="Mutual Password",r="This field is required.",l="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",d="This field is required.",u="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",S="This field is required.",E="User names must have a length of 8 to 64 characters and can contain alphanumeric characters, '.', '@', '-', '_' or ':'.",I="This field is required.",B="Passwords must have a length of 12 to 16 characters and can contain alphanumeric characters, '@', '-', '_' or '/'.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","discoveryForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],t,[1,"cd-col-form-input"],["id","user","formControlName","user","type","text","autocomplete","off",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","password",1,"cd-col-form-label"],o,[1,"input-group"],["id","password","formControlName","password","type","password","autocomplete","new-password",1,"form-control"],["type","button","cdPasswordButton","password",1,"btn","btn-light"],["source","password"],["for","mutual_user",1,"cd-col-form-label"],i,["id","mutual_user","formControlName","mutual_user","type","text","autocomplete","off",1,"form-control"],["for","mutual_password",1,"cd-col-form-label"],_,["id","mutual_password","formControlName","mutual_password","type","password","autocomplete","new-password",1,"form-control"],["type","button","cdPasswordButton","mutual_password",1,"btn","btn-light"],["source","mutual_password"],[1,"modal-footer"],[3,"form","showSubmit","submitText","submitActionEvent"],[1,"invalid-feedback"],r,l,d,u,S,E,I,B]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"div",7)(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e._UZ(11,"input",11),e.YNc(12,mn,2,0,"span",12),e.YNc(13,gn,2,0,"span",12),e.qZA()(),e.TgZ(14,"div",7)(15,"label",13),e.SDv(16,14),e.qZA(),e.TgZ(17,"div",10)(18,"div",15),e._UZ(19,"input",16)(20,"button",17)(21,"cd-copy-2-clipboard-button",18),e.qZA(),e.YNc(22,Tn,2,0,"span",12),e.YNc(23,fn,2,0,"span",12),e.qZA()(),e.TgZ(24,"div",7)(25,"label",19),e.ynx(26),e.SDv(27,20),e.BQk(),e.qZA(),e.TgZ(28,"div",10),e._UZ(29,"input",21),e.YNc(30,Cn,2,0,"span",12),e.YNc(31,Sn,2,0,"span",12),e.qZA()(),e.TgZ(32,"div",7)(33,"label",22),e.SDv(34,23),e.qZA(),e.TgZ(35,"div",10)(36,"div",15),e._UZ(37,"input",24)(38,"button",25)(39,"cd-copy-2-clipboard-button",26),e.qZA(),e.YNc(40,Rn,2,0,"span",12),e.YNc(41,En,2,0,"span",12),e.qZA()()(),e.TgZ(42,"div",27)(43,"cd-form-button-panel",28),e.NdJ("submitActionEvent",function(){return o.submitAction()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.discoveryForm),e.xp6(8),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("user",i,"pattern")),e.xp6(9),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("password",i,"pattern")),e.xp6(7),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_user",i,"pattern")),e.xp6(9),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"required")),e.xp6(1),e.Q6J("ngIf",o.discoveryForm.showError("mutual_password",i,"pattern")),e.xp6(2),e.Q6J("form",o.discoveryForm)("showSubmit",o.hasPermission)("submitText",o.actionLabels.SUBMIT)}},directives:[f.z,a._Y,a.JL,a.sg,P.V,h.P,$.o,a.Fj,K.b,a.JJ,a.u,c.O5,Tt.C,Ye.s,j.p],styles:[""]}),n})();var On=p(86969);let Rt=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-tabs"]],decls:7,vars:0,consts:function(){let s,t;return s="Overview",t="Targets",[[1,"nav","nav-tabs"],[1,"nav-item"],["routerLink","/block/iscsi/overview","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link"],s,["routerLink","/block/iscsi/targets","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link"],t]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0)(1,"li",1)(2,"a",2),e.SDv(3,3),e.qZA()(),e.TgZ(4,"li",1)(5,"a",4),e.SDv(6,5),e.qZA()()())},directives:[g.yS,g.Od],styles:[""]}),n})();var nt=p(34501),hn=p(30490),Ee=p(94928),An=p(68962);const Pn=["highlightTpl"],In=["detailTable"],bn=["tree"],Nn=function(){return["logged_in"]},Fn=function(){return["logged_out"]},Dn=function(n,s){return{"badge-success":n,"badge-danger":s}};function Ln(n,s){if(1&n&&(e._UZ(0,"i"),e.TgZ(1,"span"),e._uU(2),e.qZA(),e._uU(3," \xa0 "),e.TgZ(4,"span",8),e._uU(5),e.qZA()),2&n){const t=s.$implicit;e.Tol(t.data.cdIcon),e.xp6(2),e.Oqu(t.data.name),e.xp6(2),e.Q6J("ngClass",e.WLB(7,Dn,e.DdM(5,Nn).includes(t.data.status),e.DdM(6,Fn).includes(t.data.status))),e.xp6(1),e.hij(" ",t.data.status," ")}}function vn(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"legend"),e._uU(2),e.qZA(),e._UZ(3,"cd-table",10,11),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.Oqu(t.title),e.xp6(1),e.Q6J("data",t.data)("columns",t.columns)("limit",0)}}function $n(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Bn(n,s){if(1&n&&(e.TgZ(0,"strong"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Gn(n,s){if(1&n&&(e.YNc(0,$n,2,1,"span",12),e.YNc(1,Bn,2,1,"strong",12)),2&n){const t=s.row;e.Q6J("ngIf",void 0===t.default||t.default===t.current),e.xp6(1),e.Q6J("ngIf",void 0!==t.default&&t.default!==t.current)}}let yn=(()=>{class n{constructor(t,o){this.iscsiBackstorePipe=t,this.booleanTextPipe=o,this.icons=T.P,this.metadata={},this.nodes=[],this.treeOptions={useVirtualScroll:!0,actionMapping:{mouse:{click:this.onNodeSelected.bind(this)}}}}set content(t){this.detailTable=t,t&&t.updateColumns()}ngOnInit(){this.columns=[{prop:"displayName",name:"Name",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"current",name:"Current",flexGrow:1,cellTemplate:this.highlightTpl},{prop:"default",name:"Default",flexGrow:1,cellTemplate:this.highlightTpl}]}ngOnChanges(){this.selection&&(this.selectedItem=this.selection,this.generateTree()),this.data=void 0}generateTree(){const t=C().cloneDeep(this.selectedItem.target_controls);this.cephIscsiConfigVersion>10&&C().extend(t,C().cloneDeep(this.selectedItem.auth)),this.metadata={root:t};const o={target:{expanded:C().join(this.selectedItem.cdExecuting?[T.P.large,T.P.spinner,T.P.spin]:[T.P.large,T.P.bullseye]," ")},initiators:{expanded:C().join([T.P.large,T.P.user]," "),leaf:C().join([T.P.user]," ")},groups:{expanded:C().join([T.P.large,T.P.users]," "),leaf:C().join([T.P.users]," ")},disks:{expanded:C().join([T.P.large,T.P.disk]," "),leaf:C().join([T.P.disk]," ")},portals:{expanded:C().join([T.P.large,T.P.server]," "),leaf:C().join([T.P.server]," ")}},i=[];C().forEach(this.selectedItem.disks,d=>{const u="disk_"+d.pool+"_"+d.image;this.metadata[u]={controls:d.controls,backstore:d.backstore},["wwn","lun"].forEach(S=>{S in d&&(this.metadata[u][S]=d[S])}),i.push({name:`${d.pool}/${d.image}`,cdId:u,cdIcon:o.disks.leaf})});const _=[];C().forEach(this.selectedItem.portals,d=>{_.push({name:`${d.host}:${d.ip}`,cdIcon:o.portals.leaf})});const r=[];C().forEach(this.selectedItem.clients,d=>{const u=C().cloneDeep(d.auth);d.info&&(C().extend(u,d.info),delete u.state,C().forEach(Object.keys(d.info.state),I=>{u[I.toLowerCase()]=d.info.state[I]})),this.metadata["client_"+d.client_iqn]=u;const S=[];d.luns.forEach(I=>{S.push({name:`${I.pool}/${I.image}`,cdId:"disk_"+I.pool+"_"+I.image,cdIcon:o.disks.leaf})});let E="";d.info&&(E=Object.keys(d.info.state).includes("LOGGED_IN")?"logged_in":"logged_out"),r.push({name:d.client_iqn,status:E,cdId:"client_"+d.client_iqn,children:S,cdIcon:o.initiators.leaf})});const l=[];C().forEach(this.selectedItem.groups,d=>{const u=[];d.disks.forEach(E=>{u.push({name:`${E.pool}/${E.image}`,cdId:"disk_"+E.pool+"_"+E.image,cdIcon:o.disks.leaf})});const S=[];d.members.forEach(E=>{S.push({name:E,cdId:"client_"+E})}),l.push({name:d.group_id,cdIcon:o.groups.leaf,children:[{name:"Disks",children:u,cdIcon:o.disks.expanded},{name:"Initiators",children:S,cdIcon:o.initiators.expanded}]})}),this.nodes=[{name:this.selectedItem.target_iqn,cdId:"root",isExpanded:!0,cdIcon:o.target.expanded,children:[{name:"Disks",isExpanded:!0,children:i,cdIcon:o.disks.expanded},{name:"Portals",isExpanded:!0,children:_,cdIcon:o.portals.expanded},{name:"Initiators",isExpanded:!0,children:r,cdIcon:o.initiators.expanded},{name:"Groups",isExpanded:!0,children:l,cdIcon:o.groups.expanded}]}]}format(t){return"boolean"==typeof t?this.booleanTextPipe.transform(t):t}onNodeSelected(t,o){var i,_,r,l;if(ne.iM.ACTIVATE(t,o,!0),o.data.cdId){this.title=o.data.name;const d=this.metadata[o.data.cdId]||{};"root"===o.data.cdId?(null===(i=this.detailTable)||void 0===i||i.toggleColumn({prop:"default",isHidden:!0}),this.data=C().map(this.settings.target_default_controls,(u,S)=>({displayName:S,default:u=this.format(u),current:C().isUndefined(d[S])?u:this.format(d[S])})),this.cephIscsiConfigVersion>10&&["user","password","mutual_user","mutual_password"].forEach(u=>{this.data.push({displayName:u,default:null,current:d[u]})})):o.data.cdId.toString().startsWith("disk_")?(null===(_=this.detailTable)||void 0===_||_.toggleColumn({prop:"default",isHidden:!0}),this.data=C().map(this.settings.disk_default_controls[d.backstore],(u,S)=>({displayName:S,default:u=this.format(u),current:C().isUndefined(d.controls[S])?u:this.format(d.controls[S])})),this.data.push({displayName:"backstore",default:this.iscsiBackstorePipe.transform(this.settings.default_backstore),current:this.iscsiBackstorePipe.transform(d.backstore)}),["wwn","lun"].forEach(u=>{u in d&&this.data.push({displayName:u,default:void 0,current:d[u]})})):(null===(r=this.detailTable)||void 0===r||r.toggleColumn({prop:"default",isHidden:!1}),this.data=C().map(d,(u,S)=>({displayName:S,default:void 0,current:this.format(u)})))}else this.data=void 0;null===(l=this.detailTable)||void 0===l||l.updateColumns()}onUpdateData(){this.tree.treeModel.expandAll()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(Je.V),e.Y36(An.T))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Pn,7),e.Gf(In,5),e.Gf(bn,5)),2&t){let i;e.iGM(i=e.CRH())&&(o.highlightTpl=i.first),e.iGM(i=e.CRH())&&(o.content=i.first),e.iGM(i=e.CRH())&&(o.tree=i.first)}},inputs:{selection:"selection",settings:"settings",cephIscsiConfigVersion:"cephIscsiConfigVersion"},features:[e.TTD],decls:11,vars:3,consts:function(){let s;return s="iSCSI Topology",[[1,"row"],[1,"col-6"],s,[3,"nodes","options","updateData"],["tree",""],["treeNodeTemplate",""],["class","col-6 metadata",4,"ngIf"],["highlightTpl",""],[1,"badge",3,"ngClass"],[1,"col-6","metadata"],["columnMode","flex",3,"data","columns","limit"],["detailTable",""],[4,"ngIf"]]},template:function(t,o){1&t&&(e.TgZ(0,"div",0)(1,"div",1)(2,"legend"),e.SDv(3,2),e.qZA(),e.TgZ(4,"tree-root",3,4),e.NdJ("updateData",function(){return o.onUpdateData()}),e.YNc(6,Ln,6,10,"ng-template",null,5,e.W1O),e.qZA()(),e.YNc(8,vn,5,4,"div",6),e.qZA(),e.YNc(9,Gn,2,2,"ng-template",null,7,e.W1O)),2&t&&(e.xp6(4),e.Q6J("nodes",o.nodes)("options",o.treeOptions),e.xp6(4),e.Q6J("ngIf",o.data))},directives:[ne.qr,c.mk,c.O5,W.a],styles:[""]}),n})();function xn(n,s){if(1&n&&(e.ynx(0),e._UZ(1,"br"),e.TgZ(2,"span"),e.SDv(3,6),e.qZA(),e.TgZ(4,"pre"),e._uU(5),e.qZA(),e.BQk()),2&n){const t=e.oxw(2);e.xp6(5),e.Oqu(t.status)}}function Zn(n,s){if(1&n&&(e.TgZ(0,"cd-alert-panel",2),e.ynx(1),e.tHW(2,3),e._UZ(3,"cd-doc",4),e.N_p(),e.BQk(),e.YNc(4,xn,6,1,"ng-container",5),e.qZA()),2&n){const t=e.oxw();e.xp6(4),e.Q6J("ngIf",t.status)}}function wn(n,s){if(1&n&&e._UZ(0,"cd-iscsi-target-details",15),2&n){const t=e.oxw(2);e.Q6J("cephIscsiConfigVersion",t.cephIscsiConfigVersion)("selection",t.expandedRow)("settings",t.settings)}}const Hn=function(n){return[n]};function kn(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",7,8),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().getTargets()})("setExpandedRow",function(i){return e.CHM(t),e.oxw().setExpandedRow(i)})("updateSelection",function(i){return e.CHM(t),e.oxw().updateSelection(i)}),e.TgZ(2,"div",9),e._UZ(3,"cd-table-actions",10),e.TgZ(4,"button",11),e.NdJ("click",function(){return e.CHM(t),e.oxw().configureDiscoveryAuth()}),e._UZ(5,"i",12),e.ynx(6),e.SDv(7,13),e.BQk(),e.qZA()(),e.YNc(8,wn,1,3,"cd-iscsi-target-details",14),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.targets)("columns",t.columns)("hasDetails",!0)("autoReload",!1)("status",t.tableStatus),e.xp6(3),e.Q6J("permission",t.permission)("selection",t.selection)("tableActions",t.tableActions),e.xp6(2),e.Q6J("ngClass",e.VKq(10,Hn,t.icons.key)),e.xp6(3),e.Q6J("ngIf",t.expandedRow)}}let qn=(()=>{class n extends Ct.o{constructor(t,o,i,_,r,l,d,u,S){super(S),this.authStorageService=t,this.iscsiService=o,this.joinPipe=i,this.taskListService=_,this.notAvailablePipe=r,this.modalService=l,this.taskWrapper=d,this.actionLabels=u,this.ngZone=S,this.available=void 0,this.selection=new Re.r,this.targets=[],this.icons=T.P,this.builders={"iscsi/target/create":E=>({target_iqn:E.target_iqn})},this.permission=this.authStorageService.getPermissions().iscsi,this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>"/block/iscsi/targets/create",name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>`/block/iscsi/targets/edit/${this.selection.first().target_iqn}`,name:this.actionLabels.EDIT,disable:()=>this.getEditDisableDesc()},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteIscsiTargetModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Target",prop:"target_iqn",flexGrow:2,cellTransformation:Le.e.executing},{name:"Portals",prop:"cdPortals",pipe:this.joinPipe,flexGrow:2},{name:"Images",prop:"cdImages",pipe:this.joinPipe,flexGrow:2},{name:"# Sessions",prop:"info.num_sessions",pipe:this.notAvailablePipe,flexGrow:1}],this.iscsiService.status().subscribe(t=>{this.available=t.available,t.available||(this.status=t.message)})}getTargets(){this.available&&(this.setTableRefreshTimeout(),this.iscsiService.version().subscribe(t=>{this.cephIscsiConfigVersion=t.ceph_iscsi_config_version}),this.taskListService.init(()=>this.iscsiService.listTargets(),t=>this.prepareResponse(t),t=>this.targets=t,()=>this.onFetchError(),this.taskFilter,this.itemFilter,this.builders),this.iscsiService.settings().subscribe(t=>{this.settings=t}))}ngOnDestroy(){this.summaryDataSubscription&&this.summaryDataSubscription.unsubscribe()}getEditDisableDesc(){const t=this.selection.first();return t&&(null==t?void 0:t.cdExecuting)?t.cdExecuting:t&&C().isUndefined(null==t?void 0:t.info)?"Unavailable gateway(s)":!t}getDeleteDisableDesc(){var t;const o=this.selection.first();return(null==o?void 0:o.cdExecuting)?o.cdExecuting:o&&C().isUndefined(null==o?void 0:o.info)?"Unavailable gateway(s)":o&&(null===(t=null==o?void 0:o.info)||void 0===t?void 0:t.num_sessions)?"Target has active sessions":!o}prepareResponse(t){return t.forEach(o=>{o.cdPortals=o.portals.map(i=>`${i.host}:${i.ip}`),o.cdImages=o.disks.map(i=>`${i.pool}/${i.image}`)}),t}onFetchError(){this.table.reset()}itemFilter(t,o){return t.target_iqn===o.metadata.target_iqn}taskFilter(t){return["iscsi/target/create","iscsi/target/edit","iscsi/target/delete"].includes(t.name)}updateSelection(t){this.selection=t}deleteIscsiTargetModal(){const t=this.selection.first().target_iqn;this.modalRef=this.modalService.show(pe.M,{itemDescription:"iSCSI target",itemNames:[t],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new M.R("iscsi/target/delete",{target_iqn:t}),call:this.iscsiService.deleteTarget(t)})})}configureDiscoveryAuth(){this.modalService.show(Mn)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(X),e.Y36(On.A),e.Y36(ce.j),e.Y36(St.g),e.Y36(de.Z),e.Y36(m.P),e.Y36(D.p4),e.Y36(e.R0b))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi-target-list"]],viewQuery:function(t,o){if(1&t&&e.Gf(W.a,5),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first)}},features:[e._Bn([ce.j]),e.qOj],decls:3,vars:2,consts:function(){let s,t,o,i;return s="iSCSI Targets not available",t="Please consult the " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + " on how to configure and enable the iSCSI Targets management functionality.",o="Available information:",i="Discovery authentication",[["type","info","title",s,4,"ngIf"],["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection",4,"ngIf"],["type","info","title",s],t,["section","iscsi"],[4,"ngIf"],o,["columnMode","flex","identifier","target_iqn","forceIdentifier","true","selectionType","single",3,"data","columns","hasDetails","autoReload","status","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],i,["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings",4,"ngIf"],["cdTableDetail","",3,"cephIscsiConfigVersion","selection","settings"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.YNc(1,Zn,5,1,"cd-alert-panel",0),e.YNc(2,kn,9,12,"cd-table",1)),2&t&&(e.xp6(1),e.Q6J("ngIf",!1===o.available),e.xp6(1),e.Q6J("ngIf",!0===o.available))},directives:[Rt,c.O5,nt.G,hn.K,W.a,Ee.K,$.o,c.mk,yn],styles:[""]}),n})();var it=p(66369),Kn=p(76446),Xn=p(90068);const Qn=["iscsiSparklineTpl"],zn=["iscsiPerSecondTpl"],Jn=["iscsiRelativeDateTpl"];function Yn(n,s){if(1&n&&(e.TgZ(0,"span"),e._UZ(1,"cd-sparkline",9),e.qZA()),2&n){const t=e.oxw(),o=t.value,i=t.row;e.xp6(1),e.Q6J("data",o)("isBinary",i.cdIsBinary)}}function Vn(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function Un(n,s){if(1&n&&(e.YNc(0,Yn,2,2,"span",7),e.YNc(1,Vn,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function jn(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",t," /s ")}}function Wn(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function ei(n,s){if(1&n&&(e.YNc(0,jn,2,1,"span",7),e.YNc(1,Wn,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}function ti(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"notAvailable"),e.ALo(3,"relativeDate"),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.hij(" ",e.lcZ(2,1,e.lcZ(3,3,t))," ")}}function oi(n,s){1&n&&(e.TgZ(0,"span",10),e._uU(1," n/a "),e.qZA())}function ni(n,s){if(1&n&&(e.YNc(0,ti,4,5,"span",7),e.YNc(1,oi,2,0,"span",8)),2&n){const t=s.row;e.Q6J("ngIf","user:rbd"===t.backstore),e.xp6(1),e.Q6J("ngIf","user:rbd"!==t.backstore)}}let ii=(()=>{class n{constructor(t,o,i){this.iscsiService=t,this.dimlessPipe=o,this.iscsiBackstorePipe=i,this.gateways=[],this.images=[]}ngOnInit(){this.gatewaysColumns=[{name:"Name",prop:"name"},{name:"State",prop:"state",flexGrow:1,cellTransformation:Le.e.badge,customTemplateConfig:{map:{up:{class:"badge-success"},down:{class:"badge-danger"}}}},{name:"# Targets",prop:"num_targets"},{name:"# Sessions",prop:"num_sessions"}],this.imagesColumns=[{name:"Pool",prop:"pool"},{name:"Image",prop:"image"},{name:"Backstore",prop:"backstore",pipe:this.iscsiBackstorePipe},{name:"Read Bytes",prop:"stats_history.rd_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Write Bytes",prop:"stats_history.wr_bytes",cellTemplate:this.iscsiSparklineTpl},{name:"Read Ops",prop:"stats.rd",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"Write Ops",prop:"stats.wr",pipe:this.dimlessPipe,cellTemplate:this.iscsiPerSecondTpl},{name:"A/O Since",prop:"optimized_since",cellTemplate:this.iscsiRelativeDateTpl}]}refresh(){this.iscsiService.overview().subscribe(t=>{this.gateways=t.gateways,this.images=t.images,this.images.map(o=>(o.stats_history&&(o.stats_history.rd_bytes=o.stats_history.rd_bytes.map(i=>i[1]),o.stats_history.wr_bytes=o.stats_history.wr_bytes.map(i=>i[1])),o.cdIsBinary=!0,o))})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(X),e.Y36(it.n),e.Y36(Je.V))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-iscsi"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Qn,7),e.Gf(zn,7),e.Gf(Jn,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.iscsiSparklineTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiPerSecondTpl=i.first),e.iGM(i=e.CRH())&&(o.iscsiRelativeDateTpl=i.first)}},decls:15,vars:4,consts:function(){let s,t;return s="Gateways",t="Images",[s,[3,"data","columns","fetchData"],t,[3,"data","columns"],["iscsiSparklineTpl",""],["iscsiPerSecondTpl",""],["iscsiRelativeDateTpl",""],[4,"ngIf"],["class","text-muted",4,"ngIf"],[3,"data","isBinary"],[1,"text-muted"]]},template:function(t,o){1&t&&(e._UZ(0,"cd-iscsi-tabs"),e.TgZ(1,"legend"),e.SDv(2,0),e.qZA(),e.TgZ(3,"div")(4,"cd-table",1),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA()(),e.TgZ(5,"legend"),e.SDv(6,2),e.qZA(),e.TgZ(7,"div"),e._UZ(8,"cd-table",3),e.qZA(),e.YNc(9,Un,2,2,"ng-template",null,4,e.W1O),e.YNc(11,ei,2,2,"ng-template",null,5,e.W1O),e.YNc(13,ni,2,2,"ng-template",null,6,e.W1O)),2&t&&(e.xp6(4),e.Q6J("data",o.gateways)("columns",o.gatewaysColumns),e.xp6(4),e.Q6J("data",o.images)("columns",o.imagesColumns))},directives:[Rt,W.a,c.O5,Kn.l],pipes:[St.g,Xn.h],styles:[""]}),n})(),si=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[c.ez,Ae.m,L.Oz,g.Bz,a.u5,a.UX,L.ZQ,L.HK]]}),n})();var _i=p(13464),ai=p(26215),ri=p(45435),Et=p(36848);let q=class{constructor(s,t){this.http=s,this.timerService=t,this.REFRESH_INTERVAL=3e4,this.summaryDataSource=new ai.X(null),this.summaryData$=this.summaryDataSource.asObservable()}startPolling(){return this.timerService.get(()=>this.retrieveSummaryObservable(),this.REFRESH_INTERVAL).subscribe(this.retrieveSummaryObserver())}refresh(){return this.retrieveSummaryObservable().subscribe(this.retrieveSummaryObserver())}retrieveSummaryObservable(){return this.http.get("api/block/mirroring/summary")}retrieveSummaryObserver(){return s=>{this.summaryDataSource.next(s)}}subscribeSummary(s,t){return this.summaryData$.pipe((0,ri.h)(o=>!!o)).subscribe(s,t)}getPool(s){return this.http.get(`api/block/mirroring/pool/${s}`)}updatePool(s,t){return this.http.put(`api/block/mirroring/pool/${s}`,t,{observe:"response"})}getSiteName(){return this.http.get("api/block/mirroring/site_name")}setSiteName(s){return this.http.put("api/block/mirroring/site_name",{site_name:s},{observe:"response"})}createBootstrapToken(s){return this.http.post(`api/block/mirroring/pool/${s}/bootstrap/token`,{})}importBootstrapToken(s,t,o){return this.http.post(`api/block/mirroring/pool/${s}/bootstrap/peer`,{direction:t,token:o},{observe:"response"})}getPeer(s,t){return this.http.get(`api/block/mirroring/pool/${s}/peer/${t}`)}getPeerForPool(s){return this.http.get(`api/block/mirroring/pool/${s}/peer`)}addPeer(s,t){return this.http.post(`api/block/mirroring/pool/${s}/peer`,t,{observe:"response"})}updatePeer(s,t,o){return this.http.put(`api/block/mirroring/pool/${s}/peer/${t}`,o,{observe:"response"})}deletePeer(s,t){return this.http.delete(`api/block/mirroring/pool/${s}/peer/${t}`,{observe:"response"})}};q.\u0275fac=function(s){return new(s||q)(e.LFG(ie.eN),e.LFG(Et.f))},q.\u0275prov=e.Yz7({token:q,factory:q.\u0275fac,providedIn:"root"}),(0,F.gn)([(0,F.fM)(0,Y.G),(0,F.w6)("design:type",Function),(0,F.w6)("design:paramtypes",[String]),(0,F.w6)("design:returntype",void 0)],q.prototype,"setSiteName",null),(0,F.gn)([(0,F.fM)(1,Y.G),(0,F.fM)(2,Y.G),(0,F.w6)("design:type",Function),(0,F.w6)("design:paramtypes",[String,String,String]),(0,F.w6)("design:returntype",void 0)],q.prototype,"importBootstrapToken",null),q=(0,F.gn)([Y.o,(0,F.w6)("design:paramtypes",[ie.eN,Et.f])],q);var st=p(6481),li=p(68307),Mt=p(12627),Me=p(82945),ci=p(39749),di=p(13472);function pi(n,s){1&n&&(e.TgZ(0,"span",25),e.SDv(1,26),e.qZA())}function ui(n,s){if(1&n&&(e.TgZ(0,"div",27),e._UZ(1,"input",28),e.TgZ(2,"label",29),e._uU(3),e.qZA()()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function mi(n,s){1&n&&(e.TgZ(0,"span",25),e.SDv(1,30),e.qZA())}let gi=(()=>{class n{constructor(t,o,i){this.activeModal=t,this.rbdMirroringService=o,this.taskWrapper=i,this.pools=[],this.createForm()}createForm(){this.createBootstrapForm=new Z.d({siteName:new a.NI("",{validators:[a.kI.required]}),pools:new a.cw({},{validators:[this.validatePools()]}),token:new a.NI("",{})})}ngOnInit(){this.createBootstrapForm.get("siteName").setValue(this.siteName),this.rbdMirroringService.getSiteName().subscribe(t=>{this.createBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((_,r)=>(_.push({name:r.name,mirror_mode:r.mirror_mode}),_),[]);const i=this.createBootstrapForm.get("pools");C().each(this.pools,_=>{const r=_.name,l="disabled"===_.mirror_mode,d=i.controls[r];d?l&&d.disabled?d.enable():!l&&d.enabled&&(d.disable(),d.setValue(!0)):i.addControl(r,new a.NI({value:!l,disabled:!l}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return C().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}generate(){this.createBootstrapForm.get("token").setValue("");let t="";const o=[],i=this.createBootstrapForm.get("pools");C().each(i.controls,(u,S)=>{!0===u.value&&(t=S,u.disabled||o.push(S))});const _={mirror_mode:"image"},r=(0,st.z)(this.rbdMirroringService.setSiteName(this.createBootstrapForm.getValue("siteName")),(0,le.D)(o.map(u=>this.rbdMirroringService.updatePool(u,_))),this.rbdMirroringService.createBootstrapToken(t).pipe((0,li.b)(u=>this.createBootstrapForm.get("token").setValue(u.token)))).pipe((0,Mt.Z)()),l=()=>{this.rbdMirroringService.refresh(),this.createBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/mirroring/bootstrap/create",{}),call:r}).subscribe({error:l,complete:l})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(q),e.Y36(m.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-create-modal"]],decls:32,vars:6,consts:function(){let s,t,o,i,_,r,l,d,u,S,E;return s="Create Bootstrap Token",t="To create a bootstrap token which can be imported by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, and click\xA0 " + "\ufffd#10\ufffd" + "Generate" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",_="Pools",r="Generate",l="Token",d="Generated token...",u="Close",S="This field is required.",E="At least one pool is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","createBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],_,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],[1,"mb-4","float-end",3,"form","submitAction"],r,["for","token",1,"col-form-label"],l,["placeholder",d,"id","token","formControlName","token","readonly","",1,"form-control","resize-vertical"],["source","token",1,"float-end"],[1,"modal-footer"],["name",u,3,"backAction"],[1,"invalid-feedback"],S,[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],E]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,pi,2,0,"span",12),e.qZA(),e.TgZ(16,"div",13)(17,"label",14),e.SDv(18,15),e.qZA(),e.YNc(19,ui,4,5,"div",16),e.YNc(20,mi,2,0,"span",12),e.qZA(),e.TgZ(21,"cd-submit-button",17),e.NdJ("submitAction",function(){return o.generate()}),e.SDv(22,18),e.qZA(),e.TgZ(23,"div",8)(24,"label",19)(25,"span"),e.SDv(26,20),e.qZA()(),e.TgZ(27,"textarea",21),e._uU(28," "),e.qZA()(),e._UZ(29,"cd-copy-2-clipboard-button",22),e.qZA(),e.TgZ(30,"div",23)(31,"cd-back-button",24),e.NdJ("backAction",function(){return o.activeModal.close()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.createBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.createBootstrapForm.showError("siteName",i,"required")),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.createBootstrapForm.showError("pools",i,"requirePool")),e.xp6(1),e.Q6J("form",o.createBootstrapForm)}},directives:[f.z,a._Y,a.JL,P.V,a.sg,h.P,$.o,a.Fj,K.b,a.JJ,a.u,Me.U,c.O5,a.x0,c.sg,a.Wl,ci.w,Ye.s,di.W],styles:[".form-group.ng-invalid[_ngcontent-%COMP%] .invalid-feedback[_ngcontent-%COMP%]{display:block}"]}),n})();function Ti(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,27),e.qZA())}function fi(n,s){if(1&n&&(e.TgZ(0,"option",28),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.key),e.xp6(1),e.Oqu(t.desc)}}function Ci(n,s){if(1&n&&(e.TgZ(0,"div",29),e._UZ(1,"input",30),e.TgZ(2,"label",31),e._uU(3),e.qZA()()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.name),e.s9C("name",t.name),e.s9C("formControlName",t.name),e.xp6(1),e.s9C("for",t.name),e.xp6(1),e.Oqu(t.name)}}function Si(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,32),e.qZA())}function Ri(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,33),e.qZA())}function Ei(n,s){1&n&&(e.TgZ(0,"span",26),e.SDv(1,34),e.qZA())}let Mi=(()=>{class n{constructor(t,o,i,_){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.pools=[],this.directions=[{key:"rx-tx",desc:"Bidirectional"},{key:"rx",desc:"Unidirectional (receive-only)"}],this.createForm()}createForm(){this.importBootstrapForm=new Z.d({siteName:new a.NI("",{validators:[a.kI.required]}),direction:new a.NI("rx-tx",{}),pools:new a.cw({},{validators:[this.validatePools()]}),token:new a.NI("",{validators:[a.kI.required,this.validateToken()]})})}ngOnInit(){this.rbdMirroringService.getSiteName().subscribe(t=>{this.importBootstrapForm.get("siteName").setValue(t.site_name)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.pools=t.content_data.pools.reduce((_,r)=>(_.push({name:r.name,mirror_mode:r.mirror_mode}),_),[]);const i=this.importBootstrapForm.get("pools");C().each(this.pools,_=>{const r=_.name,l="disabled"===_.mirror_mode,d=i.controls[r];d?l&&d.disabled?d.enable():!l&&d.enabled&&(d.disable(),d.setValue(!0)):i.addControl(r,new a.NI({value:!l,disabled:!l}))})})}ngOnDestroy(){this.subs&&this.subs.unsubscribe()}validatePools(){return t=>{let o=0;return C().each(t.controls,i=>{!0===i.value&&++o}),o>0?null:{requirePool:!0}}}validateToken(){return t=>{try{if(JSON.parse(atob(t.value)))return null}catch(o){}return{invalidToken:!0}}}import(){const t=[],o=[],i=this.importBootstrapForm.get("pools");C().each(i.controls,(u,S)=>{!0===u.value&&(t.push(S),u.disabled||o.push(S))});const _={mirror_mode:"image"};let r=(0,st.z)(this.rbdMirroringService.setSiteName(this.importBootstrapForm.getValue("siteName")),(0,le.D)(o.map(u=>this.rbdMirroringService.updatePool(u,_))));r=t.reduce((u,S)=>(0,st.z)(u,this.rbdMirroringService.importBootstrapToken(S,this.importBootstrapForm.getValue("direction"),this.importBootstrapForm.getValue("token"))),r).pipe((0,Mt.Z)());const l=()=>{this.rbdMirroringService.refresh(),this.importBootstrapForm.setErrors({cdSubmitButton:!0})};this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/mirroring/bootstrap/import",{}),call:r}).subscribe({error:l,complete:()=>{l(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(D.p4),e.Y36(q),e.Y36(m.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-bootstrap-import-modal"]],decls:36,vars:10,consts:function(){let s,t,o,i,_,r,l,d,u,S,E,I;return s="Import Bootstrap Token",t="To import a bootstrap token which was created by a peer site cluster, provide the local site's name, select which pools will have mirroring enabled, provide the generated token, and click\xA0" + "\ufffd#10\ufffd" + "Import" + "\ufffd/#10\ufffd" + ".",o="Site Name",i="Name...",_="Direction",r="Pools",l="Token",d="Generated token...",u="This field is required.",S="At least one pool is required.",E="This field is required.",I="The token is invalid.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","importBootstrapForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","siteName",1,"col-form-label","required"],o,["type","text","placeholder",i,"id","siteName","name","siteName","formControlName","siteName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","direction",1,"col-form-label"],_,["id","direction","name","direction","formControlName","direction",1,"form-control"],[3,"value",4,"ngFor","ngForOf"],["formGroupName","pools",1,"form-group"],["for","pools",1,"col-form-label","required"],r,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],["for","token",1,"col-form-label","required"],l,["placeholder",d,"id","token","formControlName","token",1,"form-control","resize-vertical"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],u,[3,"value"],[1,"custom-control","custom-checkbox"],["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],S,E,I]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,Ti,2,0,"span",12),e.qZA(),e.TgZ(16,"div",8)(17,"label",13)(18,"span"),e.SDv(19,14),e.qZA()(),e.TgZ(20,"select",15),e.YNc(21,fi,2,2,"option",16),e.qZA()(),e.TgZ(22,"div",17)(23,"label",18),e.SDv(24,19),e.qZA(),e.YNc(25,Ci,4,5,"div",20),e.YNc(26,Si,2,0,"span",12),e.qZA(),e.TgZ(27,"div",8)(28,"label",21),e.SDv(29,22),e.qZA(),e.TgZ(30,"textarea",23),e._uU(31," "),e.qZA(),e.YNc(32,Ri,2,0,"span",12),e.YNc(33,Ei,2,0,"span",12),e.qZA()(),e.TgZ(34,"div",24)(35,"cd-form-button-panel",25),e.NdJ("submitActionEvent",function(){return o.import()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.importBootstrapForm),e.xp6(11),e.Q6J("ngIf",o.importBootstrapForm.showError("siteName",i,"required")),e.xp6(6),e.Q6J("ngForOf",o.directions),e.xp6(4),e.Q6J("ngForOf",o.pools),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("pools",i,"requirePool")),e.xp6(6),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"required")),e.xp6(1),e.Q6J("ngIf",o.importBootstrapForm.showError("token",i,"invalidToken")),e.xp6(2),e.Q6J("form",o.importBootstrapForm)("submitText",o.actionLabels.SUBMIT)}},directives:[f.z,a._Y,a.JL,P.V,a.sg,h.P,$.o,a.Fj,K.b,a.JJ,a.u,Me.U,c.O5,a.EJ,c.sg,a.YN,a.Kr,a.x0,a.Wl,j.p],styles:[""]}),n})();var se=p(69158),Oi=p(58111);let _t=(()=>{class n{transform(t){return"warning"===t?"badge badge-warning":"error"===t?"badge badge-danger":"success"===t?"badge badge-success":"badge badge-info"}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275pipe=e.Yjl({name:"mirrorHealthColor",type:n,pure:!0}),n})();const hi=["healthTmpl"];function Ai(n,s){if(1&n&&(e.TgZ(0,"span",2),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.health_color)),e.xp6(2),e.Oqu(o)}}let Pi=(()=>{class n{constructor(t,o){this.rbdMirroringService=t,this.cephShortVersionPipe=o,this.tableStatus=new se.E}ngOnInit(){this.columns=[{prop:"instance_id",name:"Instance",flexGrow:2},{prop:"id",name:"ID",flexGrow:2},{prop:"server_hostname",name:"Hostname",flexGrow:2},{prop:"version",name:"Version",pipe:this.cephShortVersionPipe,flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.daemons,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(q),e.Y36(Oi.F))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-daemons"]],viewQuery:function(t,o){if(1&t&&e.Gf(hi,7),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first)}},decls:3,vars:4,consts:[["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],["healthTmpl",""],[3,"ngClass"]],template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()}),e.qZA(),e.YNc(1,Ai,3,4,"ng-template",null,1,e.W1O)),2&t&&e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus)},directives:[W.a,c.mk],pipes:[_t],styles:[""]}),n})();var Ot=p(70882);class Ii{}function bi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function Ni(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,26),e.qZA())}function Fi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,27),e.qZA())}function Di(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,28),e.qZA())}function Li(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,29),e.qZA())}function vi(n,s){1&n&&(e.TgZ(0,"span",24),e.SDv(1,30),e.qZA())}let $i=(()=>{class n{constructor(t,o,i,_){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.bsConfig={containerClass:"theme-default"},this.createForm()}createForm(){this.editPeerForm=new Z.d({clusterName:new a.NI("",{validators:[a.kI.required,this.validateClusterName]}),clientID:new a.NI("",{validators:[a.kI.required,this.validateClientID]}),monAddr:new a.NI("",{validators:[this.validateMonAddr]}),key:new a.NI("",{validators:[this.validateKey]})})}ngOnInit(){this.pattern=`${this.poolName}/${this.peerUUID}`,"edit"===this.mode&&this.rbdMirroringService.getPeer(this.poolName,this.peerUUID).subscribe(t=>{this.setResponse(t)})}validateClusterName(t){if(!t.value.match(/^[\w\-_]*$/))return{invalidClusterName:{value:t.value}}}validateClientID(t){if(!t.value.match(/^(?!client\.)[\w\-_.]*$/))return{invalidClientID:{value:t.value}}}validateMonAddr(t){if(!t.value.match(/^[,; ]*([\w.\-_\[\]]+(:[\d]+)?[,; ]*)*$/))return{invalidMonAddr:{value:t.value}}}validateKey(t){try{if(""===t.value||atob(t.value))return null}catch(o){}return{invalidKey:{value:t.value}}}setResponse(t){this.response=t,this.editPeerForm.get("clusterName").setValue(t.cluster_name),this.editPeerForm.get("clientID").setValue(t.client_id),this.editPeerForm.get("monAddr").setValue(t.mon_host),this.editPeerForm.get("key").setValue(t.key)}update(){const t=new Ii;let o;t.cluster_name=this.editPeerForm.getValue("clusterName"),t.client_id=this.editPeerForm.getValue("clientID"),t.mon_host=this.editPeerForm.getValue("monAddr"),t.key=this.editPeerForm.getValue("key"),o=this.taskWrapper.wrapTaskAroundCall("edit"===this.mode?{task:new M.R("rbd/mirroring/peer/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePeer(this.poolName,this.peerUUID,t)}:{task:new M.R("rbd/mirroring/peer/add",{pool_name:this.poolName}),call:this.rbdMirroringService.addPeer(this.poolName,t)}),o.subscribe({error:()=>this.editPeerForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(D.p4),e.Y36(q),e.Y36(m.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-peer-modal"]],decls:38,vars:13,consts:function(){let s,t,o,i,_,r,l,d,u,S,E,I,B,y,Q,J,ee,te;return s="{VAR_SELECT, select, edit {Edit} other {Add}}",s=e.Zx4(s,{VAR_SELECT:"\ufffd0\ufffd"}),t="" + s + " pool mirror peer",o="{VAR_SELECT, select, edit {Edit} other {Add}}",o=e.Zx4(o,{VAR_SELECT:"\ufffd0\ufffd"}),i="" + o + " the pool mirror peer attributes for pool " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd1\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " and click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Submit" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",i=e.Zx4(i),_="Cluster Name",r="Name...",l="CephX ID",d="CephX ID...",u="Monitor Addresses",S="Comma-delimited addresses...",E="CephX Key",I="Base64-encoded key...",B="This field is required.",y="The cluster name is not valid.",Q="This field is required.",J="The CephX ID is not valid.",ee="The monitory address is not valid.",te="CephX key must be base64 encoded.",[[3,"modalRef"],[1,"modal-title"],t,[1,"modal-content"],["name","editPeerForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],i,[1,"form-group"],["for","clusterName",1,"col-form-label","required"],_,["type","text","placeholder",r,"id","clusterName","name","clusterName","formControlName","clusterName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","clientID",1,"col-form-label","required"],l,["type","text","placeholder",d,"id","clientID","name","clientID","formControlName","clientID",1,"form-control"],["for","monAddr",1,"col-form-label"],u,["type","text","placeholder",S,"id","monAddr","name","monAddr","formControlName","monAddr",1,"form-control"],["for","key",1,"col-form-label"],E,["type","text","placeholder",I,"id","key","name","key","formControlName","key",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],B,y,Q,J,ee,te]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0)(1,"span",1),e.SDv(2,2),e.qZA(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p")(8,"span"),e.tHW(9,7),e._UZ(10,"kbd")(11,"kbd"),e.N_p(),e.qZA()(),e.TgZ(12,"div",8)(13,"label",9),e.SDv(14,10),e.qZA(),e._UZ(15,"input",11),e.YNc(16,bi,2,0,"span",12),e.YNc(17,Ni,2,0,"span",12),e.qZA(),e.TgZ(18,"div",8)(19,"label",13),e.SDv(20,14),e.qZA(),e._UZ(21,"input",15),e.YNc(22,Fi,2,0,"span",12),e.YNc(23,Di,2,0,"span",12),e.qZA(),e.TgZ(24,"div",8)(25,"label",16)(26,"span"),e.SDv(27,17),e.qZA()(),e._UZ(28,"input",18),e.YNc(29,Li,2,0,"span",12),e.qZA(),e.TgZ(30,"div",8)(31,"label",19)(32,"span"),e.SDv(33,20),e.qZA()(),e._UZ(34,"input",21),e.YNc(35,vi,2,0,"span",12),e.qZA()(),e.TgZ(36,"div",22)(37,"cd-form-button-panel",23),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(2),e.pQV(o.mode),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.editPeerForm),e.xp6(7),e.pQV(o.mode)(o.poolName),e.QtT(9),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clusterName",i,"invalidClusterName")),e.xp6(5),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"required")),e.xp6(1),e.Q6J("ngIf",o.editPeerForm.showError("clientID",i,"invalidClientID")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("monAddr",i,"invalidMonAddr")),e.xp6(6),e.Q6J("ngIf",o.editPeerForm.showError("key",i,"invalidKey")),e.xp6(2),e.Q6J("form",o.editPeerForm)("submitText",o.actionLabels.SUBMIT)}},directives:[f.z,a._Y,a.JL,P.V,a.sg,h.P,$.o,a.Fj,K.b,a.JJ,a.u,Me.U,c.O5,j.p],styles:[""]}),n})();const Bi=["healthTmpl"],Gi=["localTmpl"],yi=["remoteTmpl"];function xi(n,s){if(1&n&&(e.TgZ(0,"span",6),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.health_color)),e.xp6(2),e.Oqu(o)}}function Zi(n,s){1&n&&(e.TgZ(0,"span",7),e.SDv(1,8),e.qZA())}function wi(n,s){1&n&&(e.TgZ(0,"span",9),e.SDv(1,10),e.qZA())}let ki=(()=>{class n{constructor(t,o,i,_,r){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.taskWrapper=_,this.router=r,this.selection=new Re.r,this.tableStatus=new se.E,this.data=[],this.permission=this.authStorageService.getPermissions().rbdMirroring;const l={permission:"update",icon:T.P.edit,click:()=>this.editModeModal(),name:"Edit Mode",canBePrimary:()=>!0},d={permission:"create",icon:T.P.add,name:"Add Peer",click:()=>this.editPeersModal("add"),disable:()=>!this.selection.first()||"disabled"===this.selection.first().mirror_mode,visible:()=>!this.getPeerUUID(),canBePrimary:()=>!1},u={permission:"update",icon:T.P.exchange,name:"Edit Peer",click:()=>this.editPeersModal("edit"),visible:()=>!!this.getPeerUUID()},S={permission:"delete",icon:T.P.destroy,name:"Delete Peer",click:()=>this.deletePeersModal(),visible:()=>!!this.getPeerUUID()};this.tableActions=[l,d,u,S]}ngOnInit(){this.columns=[{prop:"name",name:"Name",flexGrow:2},{prop:"mirror_mode",name:"Mode",flexGrow:2},{prop:"leader_id",name:"Leader",flexGrow:2},{prop:"image_local_count",name:"# Local",headerTemplate:this.localTmpl,flexGrow:2},{prop:"image_remote_count",name:"# Remote",headerTemplate:this.remoteTmpl,flexGrow:2},{prop:"health",name:"Health",cellTemplate:this.healthTmpl,flexGrow:1}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.data=t.content_data.pools,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}editModeModal(){this.router.navigate(["/block/mirroring",{outlets:{modal:[D.MQ.EDIT,this.selection.first().name]}}])}editPeersModal(t){const o={poolName:this.selection.first().name,mode:t};"edit"===t&&(o.peerUUID=this.getPeerUUID()),this.modalRef=this.modalService.show($i,o)}deletePeersModal(){const t=this.selection.first().name,o=this.getPeerUUID();this.modalRef=this.modalService.show(pe.M,{itemDescription:"mirror peer",itemNames:[`${t} (${o})`],submitActionObservable:()=>new Ot.y(i=>{this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/mirroring/peer/delete",{pool_name:t}),call:this.rbdMirroringService.deletePeer(t,o)}).subscribe({error:_=>i.error(_),complete:()=>{this.rbdMirroringService.refresh(),i.complete()}})})})}getPeerUUID(){const t=this.selection.first(),o=this.data.find(i=>t&&t.name===i.name);if(o&&o.peer_uuids)return o.peer_uuids[0]}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(de.Z),e.Y36(m.P),e.Y36(g.F0))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-pools"]],viewQuery:function(t,o){if(1&t&&(e.Gf(Bi,7),e.Gf(Gi,7),e.Gf(yi,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.healthTmpl=i.first),e.iGM(i=e.CRH())&&(o.localTmpl=i.first),e.iGM(i=e.CRH())&&(o.remoteTmpl=i.first)}},decls:9,vars:7,consts:function(){let s,t,o,i;return s="Local image count",t="# Local",o="Remote image count",i="# Remote",[["columnMode","flex","identifier","name","forceIdentifier","true","selectionType","single",3,"data","columns","autoReload","status","fetchData","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["healthTmpl",""],["localTmpl",""],["remoteTmpl",""],["name","modal"],[3,"ngClass"],["ngbTooltip",s],t,["ngbTooltip",o],i]},template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,xi,3,4,"ng-template",null,2,e.W1O),e.YNc(4,Zi,2,0,"ng-template",null,3,e.W1O),e.YNc(6,wi,2,0,"ng-template",null,4,e.W1O),e._UZ(8,"router-outlet",5)),2&t&&(e.Q6J("data",o.data)("columns",o.columns)("autoReload",-1)("status",o.tableStatus),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[W.a,Ee.K,c.mk,L._L,g.lC],pipes:[_t],styles:[""]}),n})();var ht=p(59376);const qi=["stateTmpl"],Ki=["syncTmpl"],Xi=["progressTmpl"];function Qi(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",13),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_error.data)("columns",t.image_error.columns)("autoReload",-1)("status",t.tableStatus)}}function zi(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",13),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_syncing.data)("columns",t.image_syncing.columns)("autoReload",-1)("status",t.tableStatus)}}function Ji(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"cd-table",13),e.NdJ("fetchData",function(){return e.CHM(t),e.oxw().refresh()}),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("data",t.image_ready.data)("columns",t.image_ready.columns)("autoReload",-1)("status",t.tableStatus)}}function Yi(n,s){if(1&n&&(e.TgZ(0,"span",14),e.ALo(1,"mirrorHealthColor"),e._uU(2),e.qZA()),2&n){const o=s.value;e.Q6J("ngClass",e.lcZ(1,2,s.row.state_color)),e.xp6(2),e.Oqu(o)}}function Vi(n,s){1&n&&e._UZ(0,"div")}function Ui(n,s){if(1&n&&e._UZ(0,"ngb-progressbar",18),2&n){const t=e.oxw().value;e.Q6J("value",t)("showValue",!0)}}function ji(n,s){if(1&n&&(e.YNc(0,Vi,1,0,"div",15),e.TgZ(1,"div",16),e.YNc(2,Ui,1,2,"ngb-progressbar",17),e.qZA()),2&n){const t=s.row;e.Q6J("ngIf","Replaying"===t.state),e.xp6(2),e.Q6J("ngIf","Replaying"===t.state)}}let Wi=(()=>{class n{constructor(t){this.rbdMirroringService=t,this.image_error={data:[],columns:{}},this.image_syncing={data:[],columns:{}},this.image_ready={data:[],columns:{}},this.tableStatus=new se.E}ngOnInit(){this.image_error.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"description",name:"Issue",flexGrow:4}],this.image_syncing.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"syncing_percent",name:"Progress",cellTemplate:this.progressTmpl,flexGrow:2},{prop:"bytes_per_second",name:"Bytes per second",flexGrow:2},{prop:"entries_behind_primary",name:"Entries behind primary",flexGrow:2}],this.image_ready.columns=[{prop:"pool_name",name:"Pool",flexGrow:2},{prop:"name",name:"Image",flexGrow:2},{prop:"state",name:"State",cellTemplate:this.stateTmpl,flexGrow:1},{prop:"description",name:"Description",flexGrow:4}],this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.image_error.data=t.content_data.image_error,this.image_syncing.data=t.content_data.image_syncing,this.image_ready.data=t.content_data.image_ready,this.tableStatus=new se.E(t.status)})}ngOnDestroy(){this.subs.unsubscribe()}refresh(){this.rbdMirroringService.refresh()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(q))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring-images"]],viewQuery:function(t,o){if(1&t&&(e.Gf(qi,7),e.Gf(Ki,7),e.Gf(Xi,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.stateTmpl=i.first),e.iGM(i=e.CRH())&&(o.syncTmpl=i.first),e.iGM(i=e.CRH())&&(o.progressTmpl=i.first)}},decls:19,vars:4,consts:function(){let s,t,o;return s="Issues (" + "\ufffd0\ufffd" + ")",t="Syncing (" + "\ufffd0\ufffd" + ")",o="Ready (" + "\ufffd0\ufffd" + ")",[["ngbNav","","cdStatefulTab","image-list",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","issues"],["ngbNavLink",""],s,["ngbNavContent",""],["ngbNavItem","syncing"],t,["ngbNavItem","ready"],o,[3,"ngbNavOutlet"],["stateTmpl",""],["progressTmpl",""],["columnMode","flex",3,"data","columns","autoReload","status","fetchData"],[3,"ngClass"],[4,"ngIf"],[1,"w-100","h-100","d-flex","justify-content-center","align-items-center"],["type","info","class","w-100",3,"value","showValue",4,"ngIf"],["type","info",1,"w-100",3,"value","showValue"]]},template:function(t,o){if(1&t&&(e.TgZ(0,"nav",0,1),e.ynx(2,2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,Qi,1,4,"ng-template",5),e.BQk(),e.ynx(6,6),e.TgZ(7,"a",3),e.SDv(8,7),e.qZA(),e.YNc(9,zi,1,4,"ng-template",5),e.BQk(),e.ynx(10,8),e.TgZ(11,"a",3),e.SDv(12,9),e.qZA(),e.YNc(13,Ji,1,4,"ng-template",5),e.BQk(),e.qZA(),e._UZ(14,"div",10),e.YNc(15,Yi,3,4,"ng-template",null,11,e.W1O),e.YNc(17,ji,3,2,"ng-template",null,12,e.W1O)),2&t){const i=e.MAs(1);e.xp6(4),e.pQV(o.image_error.data.length),e.QtT(4),e.xp6(4),e.pQV(o.image_syncing.data.length),e.QtT(8),e.xp6(4),e.pQV(o.image_ready.data.length),e.QtT(12),e.xp6(2),e.Q6J("ngbNavOutlet",i)}},directives:[L.Pz,ht.m,L.nv,L.Vx,L.uN,W.a,L.tO,c.mk,c.O5,L.Ly],pipes:[_t],styles:[""]}),n})();function es(n,s){if(1&n&&e._UZ(0,"i",19),2&n){const t=e.oxw();e.Q6J("ngClass",t.icons.edit)}}function ts(n,s){if(1&n&&e._UZ(0,"i",19),2&n){const t=e.oxw();e.Q6J("ngClass",t.icons.check)}}let os=(()=>{class n{constructor(t,o,i,_){this.authStorageService=t,this.rbdMirroringService=o,this.modalService=i,this.taskWrapper=_,this.selection=new Re.r,this.peersExist=!0,this.subs=new _i.w,this.editing=!1,this.icons=T.P,this.permission=this.authStorageService.getPermissions().rbdMirroring;const r={permission:"update",icon:T.P.upload,click:()=>this.createBootstrapModal(),name:"Create Bootstrap Token",canBePrimary:()=>!0,disable:()=>!1},l={permission:"update",icon:T.P.download,click:()=>this.importBootstrapModal(),name:"Import Bootstrap Token",disable:()=>!1};this.tableActions=[r,l]}ngOnInit(){this.createForm(),this.subs.add(this.rbdMirroringService.startPolling()),this.subs.add(this.rbdMirroringService.subscribeSummary(t=>{this.status=t.content_data.status,this.peersExist=!!t.content_data.pools.find(o=>o.peer_uuids.length>0)})),this.rbdMirroringService.getSiteName().subscribe(t=>{this.siteName=t.site_name,this.rbdmirroringForm.get("siteName").setValue(this.siteName)})}createForm(){this.rbdmirroringForm=new Z.d({siteName:new a.NI({value:"",disabled:!0})})}ngOnDestroy(){this.subs.unsubscribe()}updateSiteName(){this.editing&&this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/mirroring/site_name/edit",{}),call:this.rbdMirroringService.setSiteName(this.rbdmirroringForm.getValue("siteName"))}).subscribe({complete:()=>{this.rbdMirroringService.refresh()}}),this.editing=!this.editing}createBootstrapModal(){this.modalRef=this.modalService.show(gi,{siteName:this.siteName})}importBootstrapModal(){this.modalRef=this.modalService.show(Mi,{siteName:this.siteName})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(q),e.Y36(de.Z),e.Y36(m.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-mirroring"]],decls:31,vars:10,consts:function(){let s,t,o,i;return s="Site Name",t="Daemons",o="Pools",i="Images",[["name","rbdmirroringForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"row","mb-3"],[1,"col-md-auto"],["for","siteName",1,"col-form-label"],s,[1,"col-sm-4","d-flex"],["type","text","id","siteName","name","siteName","formControlName","siteName",1,"form-control"],["id","editSiteName",1,"btn","btn-light",3,"click"],[3,"ngClass",4,"ngIf"],[3,"source","byId"],[1,"col"],[1,"table-actions","float-end",3,"permission","selection","tableActions"],[1,"row"],[1,"col-sm-6"],t,o,[1,"col-md-12"],i,[3,"ngClass"]]},template:function(t,o){1&t&&(e.TgZ(0,"form",0,1)(2,"div",2)(3,"div",3)(4,"label",4),e.SDv(5,5),e.qZA()(),e.TgZ(6,"div",6),e._UZ(7,"input",7),e.TgZ(8,"button",8),e.NdJ("click",function(){return o.updateSiteName()}),e.YNc(9,es,1,1,"i",9),e.YNc(10,ts,1,1,"i",9),e.qZA(),e._UZ(11,"cd-copy-2-clipboard-button",10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"cd-table-actions",12),e.qZA()()(),e.TgZ(14,"div",13)(15,"div",14)(16,"legend"),e.SDv(17,15),e.qZA(),e.TgZ(18,"div"),e._UZ(19,"cd-mirroring-daemons"),e.qZA()(),e.TgZ(20,"div",14)(21,"legend"),e.SDv(22,16),e.qZA(),e.TgZ(23,"div"),e._UZ(24,"cd-mirroring-pools"),e.qZA()()(),e.TgZ(25,"div",13)(26,"div",17)(27,"legend"),e.SDv(28,18),e.qZA(),e.TgZ(29,"div"),e._UZ(30,"cd-mirroring-images"),e.qZA()()()),2&t&&(e.Q6J("formGroup",o.rbdmirroringForm),e.xp6(7),e.uIk("disabled",!o.editing||null),e.xp6(1),e.uIk("title",o.editing?"Save":"Edit"),e.xp6(1),e.Q6J("ngIf",!o.editing),e.xp6(1),e.Q6J("ngIf",o.editing),e.xp6(1),e.Q6J("source",o.siteName)("byId",!1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[a._Y,a.JL,P.V,a.sg,$.o,a.Fj,K.b,a.JJ,a.u,c.O5,c.mk,Ye.s,Ee.K,Pi,ki,Wi],styles:[""]}),n})();class ns{}function is(n,s){if(1&n&&(e.TgZ(0,"option",16),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.id),e.xp6(1),e.Oqu(t.name)}}function ss(n,s){1&n&&(e.TgZ(0,"span",17),e.SDv(1,18),e.qZA())}let _s=(()=>{class n{constructor(t,o,i,_,r,l){this.activeModal=t,this.actionLabels=o,this.rbdMirroringService=i,this.taskWrapper=_,this.route=r,this.location=l,this.bsConfig={containerClass:"theme-default"},this.peerExists=!1,this.mirrorModes=[{id:"disabled",name:"Disabled"},{id:"pool",name:"Pool"},{id:"image",name:"Image"}],this.createForm()}createForm(){this.editModeForm=new Z.d({mirrorMode:new a.NI("",{validators:[a.kI.required,this.validateMode.bind(this)]})})}ngOnInit(){this.route.params.subscribe(t=>{this.poolName=t.pool_name}),this.pattern=`${this.poolName}`,this.rbdMirroringService.getPool(this.poolName).subscribe(t=>{this.setResponse(t)}),this.subs=this.rbdMirroringService.subscribeSummary(t=>{this.peerExists=!1;const i=t.content_data.pools.find(_=>this.poolName===_.name);this.peerExists=i&&i.peer_uuids.length})}ngOnDestroy(){this.subs.unsubscribe()}validateMode(t){return"disabled"===t.value&&this.peerExists?{cannotDisable:{value:t.value}}:null}setResponse(t){this.editModeForm.get("mirrorMode").setValue(t.mirror_mode)}update(){const t=new ns;t.mirror_mode=this.editModeForm.getValue("mirrorMode"),this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/mirroring/pool/edit",{pool_name:this.poolName}),call:this.rbdMirroringService.updatePool(this.poolName,t)}).subscribe({error:()=>this.editModeForm.setErrors({cdSubmitButton:!0}),complete:()=>{this.rbdMirroringService.refresh(),this.location.back()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(D.p4),e.Y36(q),e.Y36(m.P),e.Y36(g.gz),e.Y36(c.Ye))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-pool-edit-mode-modal"]],decls:21,vars:7,consts:function(){let s,t,o,i;return s="Edit pool mirror mode",t="To edit the mirror mode for pool\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ", select a new mode from the list and click\xA0 " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Update" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ".",t=e.Zx4(t),o="Mode",i="Peer clusters must be removed prior to disabling mirror.",[["pageURL","mirroring",3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","editModeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","mirrorMode",1,"col-form-label"],o,["id","mirrorMode","name","mirrorMode","formControlName","mirrorMode",1,"form-select"],[3,"value",4,"ngFor","ngForOf"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[3,"value"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.ynx(8),e.tHW(9,7),e._UZ(10,"kbd")(11,"kbd"),e.N_p(),e.BQk(),e.qZA(),e.TgZ(12,"div",8)(13,"label",9)(14,"span"),e.SDv(15,10),e.qZA()(),e.TgZ(16,"select",11),e.YNc(17,is,2,2,"option",12),e.qZA(),e.YNc(18,ss,2,0,"span",13),e.qZA()(),e.TgZ(19,"div",14)(20,"cd-form-button-panel",15),e.NdJ("submitActionEvent",function(){return o.update()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.editModeForm),e.xp6(7),e.pQV(o.poolName),e.QtT(9),e.xp6(6),e.Q6J("ngForOf",o.mirrorModes),e.xp6(1),e.Q6J("ngIf",o.editModeForm.showError("mirrorMode",i,"cannotDisable")),e.xp6(2),e.Q6J("form",o.editModeForm)("submitText",o.actionLabels.UPDATE)}},directives:[f.z,a._Y,a.JL,P.V,a.sg,h.P,$.o,a.EJ,a.JJ,a.u,c.sg,a.YN,a.Kr,c.O5,j.p],styles:[""]}),n})();var At=p(7357),as=p(28049),rs=p(43190),Ve=p(80842),at=p(30633),$e=p(47557),ls=p(28211);class cs{}var Ie=(()=>{return(n=Ie||(Ie={}))[n.V1=1]="V1",n[n.V2=2]="V2",Ie;var n})();class ds{constructor(){this.features=[]}}class ps{constructor(){this.features=[]}}class ms extends class us{}{constructor(){super(...arguments),this.features=[]}}class rt{constructor(){this.features=[],this.remove_scheduling=!1}}var Ue=(()=>{return(n=Ue||(Ue={})).editing="editing",n.cloning="cloning",n.copying="copying",Ue;var n})(),Pt=p(18372),gs=p(17932),Ts=p(60950);function fs(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",58),e.SDv(2,59),e.ALo(3,"titlecase"),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",60)(6,"hr"),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(3),e.pQV(e.lcZ(3,1,t.action)),e.QtT(2)}}function Cs(n,s){1&n&&(e.TgZ(0,"span",61),e.ynx(1),e.SDv(2,62),e.BQk(),e.qZA())}function Ss(n,s){1&n&&(e.TgZ(0,"span",61),e.ynx(1),e.SDv(2,63),e.BQk(),e.qZA())}function Rs(n,s){1&n&&e._UZ(0,"input",64)}function Es(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,67),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ms(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,68),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Os(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,69),e.qZA()),2&n&&e.Q6J("ngValue",null)}function hs(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function As(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"select",65),e.NdJ("change",function(){return e.CHM(t),e.oxw(2).setPoolMirrorMode()}),e.YNc(1,Es,2,1,"option",66),e.YNc(2,Ms,2,1,"option",66),e.YNc(3,Os,2,1,"option",66),e.YNc(4,hs,2,2,"option",46),e.qZA()}if(2&n){const t=e.oxw(2);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function Ps(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,71),e.qZA())}const Is=function(n,s){return[n,s]};function bs(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"div",20),e._UZ(2,"i",72),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(2),e.Q6J("ngClass",e.WLB(1,Is,t.icons.spinner,t.icons.spin))}}function Ns(n,s){1&n&&e._UZ(0,"input",76)}function Fs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,78),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ds(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,79),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ls(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,80),e.qZA()),2&n&&e.Q6J("ngValue",null)}function vs(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function $s(n,s){if(1&n&&(e.TgZ(0,"select",77),e.YNc(1,Fs,2,1,"option",66),e.YNc(2,Ds,2,1,"option",66),e.YNc(3,Ls,2,1,"option",66),e.YNc(4,vs,2,2,"option",46),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.namespaces)}}function Bs(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",73),e._uU(2," Namespace "),e.qZA(),e.TgZ(3,"div",12),e.YNc(4,Ns,1,0,"input",74),e.YNc(5,$s,5,4,"select",75),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("ngIf","editing"===t.mode||!t.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==t.mode&&t.poolPermission.read)}}function Gs(n,s){1&n&&(e.TgZ(0,"cd-helper")(1,"span"),e.SDv(2,81),e.qZA()())}function ys(n,s){1&n&&e._UZ(0,"input",87)}function xs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,89),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Zs(n,s){1&n&&(e.TgZ(0,"option",50),e.SDv(1,90),e.qZA()),2&n&&e.Q6J("ngValue",null)}function ws(n,s){1&n&&(e.TgZ(0,"option",50),e._uU(1,"-- Select a data pool -- "),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Hs(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function ks(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"select",88),e.NdJ("change",function(i){return e.CHM(t),e.oxw(3).onDataPoolChange(i.target.value)}),e.YNc(1,xs,2,1,"option",66),e.YNc(2,Zs,2,1,"option",66),e.YNc(3,ws,2,1,"option",66),e.YNc(4,Hs,2,2,"option",46),e.qZA()}if(2&n){const t=e.oxw(3);e.xp6(1),e.Q6J("ngIf",null===t.dataPools),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&0===t.dataPools.length),e.xp6(1),e.Q6J("ngIf",null!==t.dataPools&&t.dataPools.length>0),e.xp6(1),e.Q6J("ngForOf",t.dataPools)}}function qs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,91),e.qZA())}const je=function(n){return{required:n}};function Ks(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",82)(2,"span",72),e.SDv(3,83),e.qZA(),e._UZ(4,"cd-helper",84),e.qZA(),e.TgZ(5,"div",12),e.YNc(6,ys,1,0,"input",85),e.YNc(7,ks,5,4,"select",86),e.YNc(8,qs,2,0,"span",14),e.qZA()()),2&n){e.oxw();const t=e.MAs(2),o=e.oxw();e.xp6(2),e.Q6J("ngClass",e.VKq(4,je,"editing"!==o.mode)),e.xp6(4),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("dataPool",t,"required"))}}function Xs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,92),e.qZA())}function Qs(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,93),e.qZA())}function zs(n,s){if(1&n&&e._UZ(0,"cd-helper",97),2&n){const t=e.oxw().$implicit;e.s9C("html",t.helperHtml)}}function Js(n,s){if(1&n&&(e.TgZ(0,"div",21),e._UZ(1,"input",94),e.TgZ(2,"label",95),e._uU(3),e.qZA(),e.YNc(4,zs,1,1,"cd-helper",96),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.s9C("id",t.key),e.s9C("name",t.key),e.s9C("formControlName",t.key),e.xp6(1),e.s9C("for",t.key),e.xp6(1),e.Oqu(t.desc),e.xp6(1),e.Q6J("ngIf",t.helperHtml)}}const It=function(n){return["edit",n]},bt=function(n){return{modal:n}},Nt=function(n){return{outlets:n}},Ft=function(n){return["/block/mirroring",n]};function Ys(n,s){if(1&n&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,98),e._UZ(3,"b")(4,"a",99),e.N_p(),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(4),e.Q6J("routerLink",e.VKq(7,Ft,e.VKq(5,Nt,e.VKq(3,bt,e.VKq(1,It,t.currentPoolName)))))}}function Vs(n,s){if(1&n&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,104),e._UZ(3,"b")(4,"a",99),e.N_p(),e.qZA()()),2&n){const t=e.oxw(4);e.xp6(4),e.Q6J("routerLink",e.VKq(7,Ft,e.VKq(5,Nt,e.VKq(3,bt,e.VKq(1,It,t.currentPoolName)))))}}function Us(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",101)(1,"input",102),e.NdJ("change",function(){return e.CHM(t),e.oxw(3).setExclusiveLock()}),e.qZA(),e.TgZ(2,"label",103),e._uU(3),e.ALo(4,"titlecase"),e.qZA(),e.YNc(5,Vs,5,9,"cd-helper",25),e.qZA()}if(2&n){const t=s.$implicit,o=e.oxw(3);e.xp6(1),e.Q6J("id",t)("value",t),e.uIk("disabled","pool"===o.poolMirrorMode&&"snapshot"===t||null),e.xp6(1),e.Q6J("for",t),e.xp6(1),e.Oqu(e.lcZ(4,6,t)),e.xp6(2),e.Q6J("ngIf","pool"===o.poolMirrorMode&&"snapshot"===t)}}function js(n,s){if(1&n&&(e.TgZ(0,"div"),e.YNc(1,Us,6,8,"div",100),e.qZA()),2&n){const t=e.oxw(2);e.xp6(1),e.Q6J("ngForOf",t.mirroringOptions)}}function Ws(n,s){if(1&n&&(e.TgZ(0,"div",9)(1,"label",105),e.tHW(2,106),e._UZ(3,"cd-helper",107),e.N_p(),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",108),e.qZA()()),2&n){const t=e.oxw(2);e.xp6(5),e.uIk("disabled",!1===t.peerConfigured||null)}}function e_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"a",109),e.NdJ("click",function(){return e.CHM(t),e.oxw(2).advancedEnabled=!0,!1}),e.SDv(1,110),e.qZA()}}function t_(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function o_(n,s){if(1&n&&(e.TgZ(0,"option",70),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function n_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,111),e.qZA())}function i_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,112),e.qZA())}function s_(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,113),e.qZA())}function __(n,s){1&n&&(e.TgZ(0,"span",61),e.SDv(1,114),e.qZA())}function a_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,fs,7,3,"div",8),e.TgZ(10,"div",9)(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,Cs,3,0,"span",14),e.YNc(16,Ss,3,0,"span",14),e.qZA()(),e.TgZ(17,"div",15),e.NdJ("change",function(i){return e.CHM(t),e.oxw().onPoolChange(i.target.value)}),e.TgZ(18,"label",16),e.SDv(19,17),e.qZA(),e.TgZ(20,"div",12),e.YNc(21,Rs,1,0,"input",18),e.YNc(22,As,5,4,"select",19),e.YNc(23,Ps,2,0,"span",14),e.qZA()(),e.YNc(24,bs,3,4,"div",8),e.YNc(25,Bs,6,2,"div",8),e.TgZ(26,"div",9)(27,"div",20)(28,"div",21)(29,"input",22),e.NdJ("change",function(){return e.CHM(t),e.oxw().onUseDataPoolChange()}),e.qZA(),e.TgZ(30,"label",23),e.SDv(31,24),e.qZA(),e.YNc(32,Gs,3,0,"cd-helper",25),e.qZA()()(),e.YNc(33,Ks,9,6,"div",8),e.TgZ(34,"div",9)(35,"label",26),e.SDv(36,27),e.qZA(),e.TgZ(37,"div",12),e._UZ(38,"input",28),e.YNc(39,Xs,2,0,"span",14),e.YNc(40,Qs,2,0,"span",14),e.qZA()(),e.TgZ(41,"div",29)(42,"label",30),e.SDv(43,31),e.qZA(),e.TgZ(44,"div",12),e.YNc(45,Js,5,6,"div",32),e.qZA()(),e.TgZ(46,"div",9)(47,"div",20)(48,"div",21)(49,"input",33),e.NdJ("change",function(){return e.CHM(t),e.oxw().setMirrorMode()}),e.qZA(),e.TgZ(50,"label",34),e._uU(51,"Mirroring"),e.qZA(),e.YNc(52,Ys,5,9,"cd-helper",25),e.qZA(),e.YNc(53,js,2,1,"div",25),e.qZA()(),e.YNc(54,Ws,6,1,"div",8),e.TgZ(55,"div",35)(56,"div",36),e.YNc(57,e_,2,0,"a",37),e.qZA()(),e.TgZ(58,"div",38)(59,"legend",39),e.SDv(60,40),e.qZA(),e.TgZ(61,"div",41)(62,"h4",39),e.SDv(63,42),e.qZA(),e.TgZ(64,"div",9)(65,"label",43),e.tHW(66,44),e._UZ(67,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(68,"div",12)(69,"select",45),e.YNc(70,t_,2,2,"option",46),e.qZA()()(),e.TgZ(71,"div",9)(72,"label",47),e.tHW(73,48),e._UZ(74,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(75,"div",12)(76,"select",49)(77,"option",50),e.SDv(78,51),e.qZA(),e.YNc(79,o_,2,2,"option",46),e.qZA(),e.YNc(80,n_,2,0,"span",14),e.YNc(81,i_,2,0,"span",14),e.qZA()(),e.TgZ(82,"div",9)(83,"label",52),e.tHW(84,53),e._UZ(85,"cd-helper"),e.N_p(),e.qZA(),e.TgZ(86,"div",12),e._UZ(87,"input",54),e.YNc(88,s_,2,0,"span",14),e.YNc(89,__,2,0,"span",14),e.qZA()()(),e.TgZ(90,"cd-rbd-configuration-form",55),e.NdJ("changes",function(i){return e.CHM(t),e.oxw().getDirtyConfigurationValues=i}),e.qZA()()(),e.TgZ(91,"div",56)(92,"cd-form-button-panel",57),e.NdJ("submitActionEvent",function(){return e.CHM(t),e.oxw().submit()}),e.ALo(93,"titlecase"),e.ALo(94,"upperFirst"),e.qZA()()()()()}if(2&n){const t=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.rbdForm),e.xp6(6),e.pQV(e.lcZ(6,35,o.action))(e.lcZ(7,37,o.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",o.rbdForm.getValue("parent")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("name",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("name",t,"pattern")),e.xp6(2),e.Q6J("ngClass",e.VKq(43,je,"editing"!==o.mode)),e.xp6(3),e.Q6J("ngIf","editing"===o.mode||!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("pool",t,"required")),e.xp6(1),e.Q6J("ngIf","editing"!==o.mode&&o.rbdForm.getValue("pool")&&null===o.namespaces),e.xp6(1),e.Q6J("ngIf","editing"===o.mode&&o.rbdForm.getValue("namespace")||"editing"!==o.mode&&(o.namespaces&&o.namespaces.length>0||!o.poolPermission.read)),e.xp6(7),e.Q6J("ngIf",o.allDataPools.length<=1),e.xp6(1),e.Q6J("ngIf",o.rbdForm.getValue("useDataPool")),e.xp6(6),e.Q6J("ngIf",o.rbdForm.showError("size",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("size",t,"invalidSizeObject")),e.xp6(5),e.Q6J("ngForOf",o.featuresList),e.xp6(7),e.Q6J("ngIf",!1===o.mirroring&&o.currentPoolName),e.xp6(1),e.Q6J("ngIf",o.mirroring),e.xp6(1),e.Q6J("ngIf","snapshot"===o.rbdForm.getValue("mirroringMode")&&o.mirroring),e.xp6(3),e.Q6J("ngIf",!o.advancedEnabled),e.xp6(1),e.Q6J("hidden",!o.advancedEnabled),e.xp6(12),e.Q6J("ngForOf",o.objectSizes),e.xp6(2),e.Q6J("ngClass",e.VKq(45,je,o.rbdForm.getValue("stripingCount"))),e.xp6(5),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.objectSizes),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingUnit",t,"invalidStripingUnit")),e.xp6(2),e.Q6J("ngClass",e.VKq(47,je,o.rbdForm.getValue("stripingUnit"))),e.xp6(5),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"required")),e.xp6(1),e.Q6J("ngIf",o.rbdForm.showError("stripingCount",t,"min")),e.xp6(1),e.Q6J("form",o.rbdForm)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",t)("submitText",e.lcZ(93,39,o.action)+" "+e.lcZ(94,41,o.resource))}}let Be=(()=>{class n extends k.E{constructor(t,o,i,_,r,l,d,u,S,E){super(),this.authStorageService=t,this.route=o,this.poolService=i,this.rbdService=_,this.formatter=r,this.taskWrapper=l,this.dimlessBinaryPipe=d,this.actionLabels=u,this.router=S,this.rbdMirroringService=E,this.namespaces=[],this.namespacesByPoolCache={},this.pools=null,this.allPools=null,this.dataPools=null,this.allDataPools=[],this.featuresList=[],this.initializeConfigData=new At.t(1),this.peerConfigured=!1,this.advancedEnabled=!1,this.rbdFormMode=Ue,this.defaultObjectSize="4 MiB",this.mirroringOptions=["journal","snapshot"],this.mirroring=!1,this.currentPoolName="",this.objectSizes=["4 KiB","8 KiB","16 KiB","32 KiB","64 KiB","128 KiB","256 KiB","512 KiB","1 MiB","2 MiB","4 MiB","8 MiB","16 MiB","32 MiB"],this.defaultStripingUnit="4 MiB",this.defaultStripingCount=1,this.rbdImage=new At.t(1),this.icons=T.P,this.routerUrl=this.router.url,this.poolPermission=this.authStorageService.getPermissions().pool,this.resource="RBD",this.features={"deep-flatten":{desc:"Deep flatten",requires:null,allowEnable:!1,allowDisable:!0},layering:{desc:"Layering",requires:null,allowEnable:!1,allowDisable:!1},"exclusive-lock":{desc:"Exclusive lock",requires:null,allowEnable:!0,allowDisable:!0},"object-map":{desc:"Object map (requires exclusive-lock)",requires:"exclusive-lock",allowEnable:!0,allowDisable:!0,initDisabled:!0},"fast-diff":{desc:"Fast diff (interlocked with object-map)",requires:"object-map",allowEnable:!0,allowDisable:!0,interlockedWith:"object-map",initDisabled:!0}},this.featuresList=this.objToArray(this.features),this.createForm()}objToArray(t){return C().map(t,(o,i)=>Object.assign(o,{key:i}))}createForm(){this.rbdForm=new Z.d({parent:new a.NI(""),name:new a.NI("",{validators:[a.kI.required,a.kI.pattern(/^[^@/]+?$/)]}),pool:new a.NI(null,{validators:[a.kI.required]}),namespace:new a.NI(null),useDataPool:new a.NI(!1),dataPool:new a.NI(null),size:new a.NI(null,{updateOn:"blur"}),obj_size:new a.NI(this.defaultObjectSize),features:new Z.d(this.featuresList.reduce((t,o)=>(t[o.key]=new a.NI({value:!1,disabled:!!o.initDisabled}),t),{})),mirroring:new a.NI(""),schedule:new a.NI("",{validators:[a.kI.pattern(/^([0-9]+)d|([0-9]+)h|([0-9]+)m$/)]}),mirroringMode:new a.NI(""),stripingUnit:new a.NI(this.defaultStripingUnit),stripingCount:new a.NI(this.defaultStripingCount,{updateOn:"blur"})},this.validateRbdForm(this.formatter))}disableForEdit(){this.rbdForm.get("parent").disable(),this.rbdForm.get("pool").disable(),this.rbdForm.get("namespace").disable(),this.rbdForm.get("useDataPool").disable(),this.rbdForm.get("dataPool").disable(),this.rbdForm.get("obj_size").disable(),this.rbdForm.get("stripingUnit").disable(),this.rbdForm.get("stripingCount").disable(),this.rbdImage.subscribe(t=>{t.image_format===Ie.V1&&(this.rbdForm.get("deep-flatten").disable(),this.rbdForm.get("layering").disable(),this.rbdForm.get("exclusive-lock").disable())})}disableForClone(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}disableForCopy(){this.rbdForm.get("parent").disable(),this.rbdForm.get("size").disable()}ngOnInit(){this.prepareFormForAction(),this.gatherNeededData().subscribe(this.handleExternalData.bind(this))}setExclusiveLock(){this.mirroring&&"journal"===this.rbdForm.get("mirroringMode").value?(this.rbdForm.get("exclusive-lock").setValue(!0),this.rbdForm.get("exclusive-lock").disable()):(this.rbdForm.get("exclusive-lock").enable(),"pool"===this.poolMirrorMode&&this.rbdForm.get("mirroringMode").setValue(this.mirroringOptions[0]))}setMirrorMode(){this.mirroring=!this.mirroring,this.setExclusiveLock(),this.checkPeersConfigured()}checkPeersConfigured(t){var o=t||this.rbdForm.get("pool").value;this.rbdMirroringService.getPeerForPool(o).subscribe(i=>{i.length>0&&(this.peerConfigured=!0)})}setPoolMirrorMode(){var t;this.currentPoolName=this.mode===this.rbdFormMode.editing?null===(t=this.response)||void 0===t?void 0:t.pool_name:this.rbdForm.getValue("pool"),this.currentPoolName&&(this.rbdMirroringService.refresh(),this.rbdMirroringService.subscribeSummary(o=>{const i=o.content_data.pools.find(_=>_.name===this.currentPoolName);this.poolMirrorMode=i.mirror_mode,"disabled"===i.mirror_mode&&(this.mirroring=!1,this.rbdForm.get("mirroring").setValue(this.mirroring),this.rbdForm.get("mirroring").disable())})),this.setExclusiveLock()}prepareFormForAction(){const t=this.routerUrl;t.startsWith("/block/rbd/edit")?(this.mode=this.rbdFormMode.editing,this.action=this.actionLabels.EDIT,this.disableForEdit()):t.startsWith("/block/rbd/clone")?(this.mode=this.rbdFormMode.cloning,this.disableForClone(),this.action=this.actionLabels.CLONE):t.startsWith("/block/rbd/copy")?(this.mode=this.rbdFormMode.copying,this.action=this.actionLabels.COPY,this.disableForCopy()):this.action=this.actionLabels.CREATE,C().each(this.features,o=>{this.rbdForm.get("features").get(o.key).valueChanges.subscribe(i=>this.featureFormUpdate(o.key,i))})}gatherNeededData(){const t={};return this.mode?this.route.params.subscribe(o=>{const i=v.N.fromString(decodeURIComponent(o.image_spec));o.snap&&(this.snapName=decodeURIComponent(o.snap)),t.rbd=this.rbdService.get(i),this.checkPeersConfigured(i.poolName)}):t.defaultFeatures=this.rbdService.defaultFeatures(),this.mode!==this.rbdFormMode.editing&&this.poolPermission.read&&(t.pools=this.poolService.list(["pool_name","type","flags_names","application_metadata"])),(0,le.D)(t)}handleExternalData(t){if(this.handlePoolData(t.pools),this.setPoolMirrorMode(),t.defaultFeatures&&this.setFeatures(t.defaultFeatures),t.rbd){const o=t.rbd;this.setResponse(o,this.snapName),this.rbdImage.next(o)}this.loadingReady()}handlePoolData(t){if(!t)return;const o=[],i=[];for(const _ of t)this.rbdService.isRBDPool(_)&&("replicated"===_.type?(o.push(_),i.push(_)):"erasure"===_.type&&-1!==_.flags_names.indexOf("ec_overwrites")&&i.push(_));if(this.pools=o,this.allPools=o,this.dataPools=i,this.allDataPools=i,1===this.pools.length){const _=this.pools[0].pool_name;this.rbdForm.get("pool").setValue(_),this.onPoolChange(_)}this.allDataPools.length<=1&&this.rbdForm.get("useDataPool").disable()}onPoolChange(t){const o=this.rbdForm.get("dataPool");o.value===t&&o.setValue(null),this.dataPools=this.allDataPools?this.allDataPools.filter(i=>i.pool_name!==t):[],this.namespaces=null,t in this.namespacesByPoolCache?this.namespaces=this.namespacesByPoolCache[t]:this.rbdService.listNamespaces(t).subscribe(i=>{i=i.map(_=>_.namespace),this.namespacesByPoolCache[t]=i,this.namespaces=i}),this.rbdForm.get("namespace").setValue(null)}onUseDataPoolChange(){this.rbdForm.getValue("useDataPool")||(this.rbdForm.get("dataPool").setValue(null),this.onDataPoolChange(null))}onDataPoolChange(t){const o=this.allPools.filter(i=>i.pool_name!==t);this.rbdForm.getValue("pool")===t&&this.rbdForm.get("pool").setValue(null),this.pools=o}validateRbdForm(t){return o=>{const i=o.get("useDataPool"),_=o.get("dataPool");let r=null;i.value&&null==_.value&&(r={required:!0}),_.setErrors(r);const l=o.get("size"),d=o.get("obj_size"),u=t.toBytes(null!=d.value?d.value:this.defaultObjectSize),S=o.get("stripingCount"),E=null!=S.value?S.value:this.defaultStripingCount;let I=null;null===l.value?I={required:!0}:E*u>t.toBytes(l.value)&&(I={invalidSizeObject:!0}),l.setErrors(I);const B=o.get("stripingUnit");let y=null;null===B.value&&null!==S.value?y={required:!0}:null!==B.value&&t.toBytes(B.value)>u&&(y={invalidStripingUnit:!0}),B.setErrors(y);let Q=null;return null===S.value&&null!==B.value?Q={required:!0}:E<1&&(Q={min:!0}),S.setErrors(Q),null}}deepBoxCheck(t,o){this.getDependentChildFeatures(t).forEach(_=>{const r=this.rbdForm.get(_.key);o?r.enable({emitEvent:!1}):(r.disable({emitEvent:!1}),r.setValue(!1,{emitEvent:!1}),this.deepBoxCheck(_.key,o));const l=this.rbdForm.get("features");this.mode===this.rbdFormMode.editing&&l.get(_.key).enabled&&(-1!==this.response.features_name.indexOf(_.key)&&!_.allowDisable||-1===this.response.features_name.indexOf(_.key)&&!_.allowEnable)&&l.get(_.key).disable()})}getDependentChildFeatures(t){return C().filter(this.features,o=>o.requires===t)||[]}interlockCheck(t,o){const i=this.featuresList.find(_=>_.key===t);if(this.response){const _=null!=i.interlockedWith,r=this.featuresList.find(d=>d.interlockedWith===i.key),l=!!this.response.features_name.find(d=>d===i.key);if(_){if(l!==!!this.response.features_name.find(u=>u===i.interlockedWith))return}else if(r&&!!this.response.features_name.find(u=>u===r.key)!==l)return}o?C().filter(this.features,_=>_.interlockedWith===t).forEach(_=>this.rbdForm.get(_.key).setValue(!0,{emitEvent:!1})):i.interlockedWith&&this.rbdForm.get("features").get(i.interlockedWith).setValue(!1)}featureFormUpdate(t,o){if(o){const i=this.features[t].requires;if(i&&!this.rbdForm.getValue(i))return void this.rbdForm.get(`features.${t}`).setValue(!1)}this.deepBoxCheck(t,o),this.interlockCheck(t,o)}setFeatures(t){const o=this.rbdForm.get("features");C().forIn(this.features,i=>{-1!==t.indexOf(i.key)&&o.get(i.key).setValue(!0),this.featureFormUpdate(i.key,o.get(i.key).value)})}setResponse(t,o){this.response=t;const i=new v.N(t.pool_name,t.namespace,t.name).toString();if(this.mode===this.rbdFormMode.cloning)this.rbdForm.get("parent").setValue(`${i}@${o}`);else if(this.mode===this.rbdFormMode.copying)o?this.rbdForm.get("parent").setValue(`${i}@${o}`):this.rbdForm.get("parent").setValue(`${i}`);else if(t.parent){const _=t.parent;this.rbdForm.get("parent").setValue(`${_.pool_name}/${_.image_name}@${_.snap_name}`)}this.mode===this.rbdFormMode.editing&&(this.rbdForm.get("name").setValue(t.name),"snapshot"===(null==t?void 0:t.mirror_mode)||t.features_name.includes("journaling")?(this.mirroring=!0,this.rbdForm.get("mirroring").setValue(this.mirroring),this.rbdForm.get("mirroringMode").setValue(null==t?void 0:t.mirror_mode),this.rbdForm.get("schedule").setValue(null==t?void 0:t.schedule_interval)):(this.mirroring=!1,this.rbdForm.get("mirroring").setValue(this.mirroring)),this.setPoolMirrorMode()),this.rbdForm.get("pool").setValue(t.pool_name),this.onPoolChange(t.pool_name),this.rbdForm.get("namespace").setValue(t.namespace),t.data_pool&&(this.rbdForm.get("useDataPool").setValue(!0),this.rbdForm.get("dataPool").setValue(t.data_pool)),this.rbdForm.get("size").setValue(this.dimlessBinaryPipe.transform(t.size)),this.rbdForm.get("obj_size").setValue(this.dimlessBinaryPipe.transform(t.obj_size)),this.setFeatures(t.features_name),this.rbdForm.get("stripingUnit").setValue(this.dimlessBinaryPipe.transform(t.stripe_unit)),this.rbdForm.get("stripingCount").setValue(t.stripe_count),this.initializeConfigData.next({initialData:this.response.configuration,sourceType:at.h.image})}createRequest(){const t=new ms;return t.pool_name=this.rbdForm.getValue("pool"),t.namespace=this.rbdForm.getValue("namespace"),t.name=this.rbdForm.getValue("name"),t.schedule_interval=this.rbdForm.getValue("schedule"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),"image"===this.poolMirrorMode&&(t.mirror_mode=this.rbdForm.getValue("mirroringMode")),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(),t}addObjectSizeAndStripingToRequest(t){t.obj_size=this.formatter.toBytes(this.rbdForm.getValue("obj_size")),C().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),this.mirroring&&"journal"===this.rbdForm.getValue("mirroringMode")&&t.features.push("journaling"),t.stripe_unit=this.formatter.toBytes(this.rbdForm.getValue("stripingUnit")),t.stripe_count=this.rbdForm.getValue("stripingCount"),t.data_pool=this.rbdForm.getValue("dataPool")}createAction(){const t=this.createRequest();return this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/create",{pool_name:t.pool_name,namespace:t.namespace,image_name:t.name,schedule_interval:t.schedule_interval,start_time:t.start_time}),call:this.rbdService.create(t)})}editRequest(){const t=new rt;if(t.name=this.rbdForm.getValue("name"),t.schedule_interval=this.rbdForm.getValue("schedule"),t.name=this.rbdForm.getValue("name"),t.size=this.formatter.toBytes(this.rbdForm.getValue("size")),C().forIn(this.features,o=>{this.rbdForm.getValue(o.key)&&t.features.push(o.key)}),t.enable_mirror=this.rbdForm.getValue("mirroring"),t.enable_mirror)"journal"===this.rbdForm.getValue("mirroringMode")&&t.features.push("journaling"),"image"===this.poolMirrorMode&&(t.mirror_mode=this.rbdForm.getValue("mirroringMode"));else{const o=t.features.indexOf("journaling",0);o>-1&&t.features.splice(o,1)}return t.configuration=this.getDirtyConfigurationValues(),t}cloneRequest(){const t=new ds;return t.child_pool_name=this.rbdForm.getValue("pool"),t.child_namespace=this.rbdForm.getValue("namespace"),t.child_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,at.h.image),t}editAction(){const t=new v.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/edit",{image_spec:t.toString()}),call:this.rbdService.update(t,this.editRequest())})}cloneAction(){const t=this.cloneRequest(),o=new v.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/clone",{parent_image_spec:o.toString(),parent_snap_name:this.snapName,child_pool_name:t.child_pool_name,child_namespace:t.child_namespace,child_image_name:t.child_image_name}),call:this.rbdService.cloneSnapshot(o,this.snapName,t)})}copyRequest(){const t=new ps;return this.snapName&&(t.snapshot_name=this.snapName),t.dest_pool_name=this.rbdForm.getValue("pool"),t.dest_namespace=this.rbdForm.getValue("namespace"),t.dest_image_name=this.rbdForm.getValue("name"),this.addObjectSizeAndStripingToRequest(t),t.configuration=this.getDirtyConfigurationValues(!0,at.h.image),t}copyAction(){const t=this.copyRequest(),o=new v.N(this.response.pool_name,this.response.namespace,this.response.name);return this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/copy",{src_image_spec:o.toString(),dest_pool_name:t.dest_pool_name,dest_namespace:t.dest_namespace,dest_image_name:t.dest_image_name}),call:this.rbdService.copy(o,t)})}submit(){this.mode||this.rbdImage.next("create"),this.rbdImage.pipe((0,as.P)(),(0,rs.w)(()=>this.mode===this.rbdFormMode.editing?this.editAction():this.mode===this.rbdFormMode.cloning?this.cloneAction():this.mode===this.rbdFormMode.copying?this.copyAction():this.createAction())).subscribe(()=>{},()=>this.rbdForm.setErrors({cdSubmitButton:!0}),()=>this.router.navigate(["/block/rbd"]))}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(g.gz),e.Y36(Ve.q),e.Y36(H),e.Y36(ls.H),e.Y36(m.P),e.Y36($e.$),e.Y36(D.p4),e.Y36(g.F0),e.Y36(q))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let s,t,o,i,_,r,l,d,u,S,E,I,B,y,Q,J,ee,te,w,_e,ae,O,ue,me,ge,Te,fe,Ce,Se,G,ye,xe,Ze,we,He,ke,qe,Ke,Xe,Qe,ze;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="Pool",i="Use a dedicated data pool",_="Size",r="e.g., 10GiB",l="Features",d="Advanced",u="Striping",S="Object size" + "\ufffd#67\ufffd" + "Objects in the Ceph Storage Cluster have a maximum configurable size (e.g., 2MB, 4MB, etc.). The object size should be large enough to accommodate many stripe units, and should be a multiple of the stripe unit." + "\ufffd/#67\ufffd" + "",E="Stripe unit" + "\ufffd#74\ufffd" + "Stripes have a configurable unit size (e.g., 64kb). The Ceph Client divides the data it will write to objects into equally sized stripe units, except for the last stripe unit. A stripe width, should be a fraction of the Object Size so that an object may contain many stripe units." + "\ufffd/#74\ufffd" + "",I="-- Select stripe unit --",B="Stripe count" + "\ufffd#85\ufffd" + "The Ceph Client writes a sequence of stripe units over a series of objects determined by the stripe count. The series of objects is called an object set. After the Ceph Client writes to the last object in the object set, it returns to the first object in the object set." + "\ufffd/#85\ufffd" + "",y="" + "\ufffd0\ufffd" + " from",Q="This field is required.",J="'/' and '@' are not allowed.",ee="Loading...",te="-- No rbd pools available --",w="-- Select a pool --",_e="This field is required.",ae="Loading...",O="-- No namespaces available --",ue="-- Select a namespace --",me="You need more than one pool with the rbd application label use to use a dedicated data pool.",ge="Data pool",Te="Dedicated pool that stores the object-data of the RBD.",fe="Loading...",Ce="-- No data pools available --",Se="This field is required.",G="This field is required.",ye="You have to increase the size.",xe="You need to enable a " + "\ufffd#3\ufffd" + "mirror mode" + "\ufffd/#3\ufffd" + " in the selected pool. Please " + "\ufffd#4\ufffd" + "click here to select a mode and enable it in this pool." + "\ufffd/#4\ufffd" + "",Ze="You need to enable " + "\ufffd#3\ufffd" + "image mirror mode" + "\ufffd/#3\ufffd" + " in the selected pool. Please " + "\ufffd#4\ufffd" + "click here to select a mode and enable it in this pool." + "\ufffd/#4\ufffd" + "",we="Create Mirror-Snapshots automatically on a periodic basis. The interval can be specified in days, hours, or minutes using d, h, m suffix respectively. To create mirror snapshots, you must import or create and have available peers to mirror",He="Schedule Interval " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + "",ke="e.g., 12h or 1d or 10m",qe="Advanced...",Ke="This field is required because stripe count is defined!",Xe="Stripe unit is greater than object size.",Qe="This field is required because stripe unit is defined!",ze="Stripe count must be greater than 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","rbdForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],s,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Name...","id","name","name","name","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"form-group","row",3,"change"],["for","pool",1,"cd-col-form-label",3,"ngClass"],o,["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-select","formControlName","pool",3,"change",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","id","useDataPool","name","useDataPool","formControlName","useDataPool",1,"custom-control-input",3,"change"],["for","useDataPool",1,"custom-control-label"],i,[4,"ngIf"],["for","size",1,"cd-col-form-label","required"],_,["id","size","name","size","type","text","formControlName","size","placeholder",r,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["formGroupName","features",1,"form-group","row"],["for","features",1,"cd-col-form-label"],l,["class","custom-control custom-checkbox",4,"ngFor","ngForOf"],["type","checkbox","id","mirroring","name","mirroring","formControlName","mirroring",1,"custom-control-input",3,"change"],["for","mirroring",1,"custom-control-label"],[1,"row"],[1,"col-sm-12"],["class","float-end margin-right-md","href","",3,"click",4,"ngIf"],[3,"hidden"],[1,"cd-header"],d,[1,"col-md-12"],u,["for","size",1,"cd-col-form-label"],S,["id","obj_size","name","obj_size","formControlName","obj_size",1,"form-select"],[3,"value",4,"ngFor","ngForOf"],["for","stripingUnit",1,"cd-col-form-label",3,"ngClass"],E,["id","stripingUnit","name","stripingUnit","formControlName","stripingUnit",1,"form-select"],[3,"ngValue"],I,["for","stripingCount",1,"cd-col-form-label",3,"ngClass"],B,["id","stripingCount","name","stripingCount","formControlName","stripingCount","type","number",1,"form-control"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","name",1,"cd-col-form-label"],y,["type","text","id","parent","name","parent","formControlName","parent",1,"form-control"],[1,"invalid-feedback"],Q,J,["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-select",3,"change"],[3,"ngValue",4,"ngIf"],ee,te,w,[3,"value"],_e,[3,"ngClass"],["for","pool",1,"cd-col-form-label"],["class","form-control","type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",4,"ngIf"],["id","namespace","name","namespace","class","form-select","formControlName","namespace",4,"ngIf"],["type","text","placeholder","Namespace...","id","namespace","name","namespace","formControlName","namespace",1,"form-control"],["id","namespace","name","namespace","formControlName","namespace",1,"form-select"],ae,O,ue,me,["for","dataPool",1,"cd-col-form-label"],ge,["html",Te],["class","form-control","type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",4,"ngIf"],["id","dataPool","name","dataPool","class","form-select","formControlName","dataPool",3,"change",4,"ngIf"],["type","text","placeholder","Data pool name...","id","dataPool","name","dataPool","formControlName","dataPool",1,"form-control"],["id","dataPool","name","dataPool","formControlName","dataPool",1,"form-select",3,"change"],fe,Ce,Se,G,ye,["type","checkbox",1,"custom-control-input",3,"id","name","formControlName"],[1,"custom-control-label",3,"for"],[3,"html",4,"ngIf"],[3,"html"],xe,[3,"routerLink"],["class","custom-control custom-radio ms-2",4,"ngFor","ngForOf"],[1,"custom-control","custom-radio","ms-2"],["type","radio","name","mirroringMode","formControlName","mirroringMode",1,"form-check-input",3,"id","value","change"],[1,"form-check-label",3,"for"],Ze,[1,"cd-col-form-label"],He,["html",we],["id","schedule","name","schedule","type","text","formControlName","schedule","placeholder",ke,1,"form-control"],["href","",1,"float-end","margin-right-md",3,"click"],qe,Ke,Xe,Qe,ze]},template:function(t,o){1&t&&e.YNc(0,a_,95,49,"div",0),2&t&&e.Q6J("cdFormLoading",o.loading)},directives:[gt.y,a._Y,a.JL,a.sg,P.V,c.O5,h.P,$.o,a.Fj,K.b,a.JJ,a.u,Me.U,c.mk,a.EJ,a.YN,a.Kr,c.sg,a.Wl,Pt.S,gs.Q,a.x0,g.yS,a._,a.wV,Ts.d,j.p],pipes:[c.rS,tt.m],styles:[""]}),n})();var Dt=p(71225),lt=p(36169);let r_=(()=>{class n{constructor(){}static getCount(t){var o;return Number(null===(o=t.headers)||void 0===o?void 0:o.get("X-Total-Count"))}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275prov=e.Yz7({token:n,factory:n.\u0275fac,providedIn:"root"}),n})();var ct=p(51847),l_=p(16738),Oe=p.n(l_),dt=p(62862),c_=p(52266);function d_(n,s){1&n&&(e.TgZ(0,"div",18)(1,"span"),e.SDv(2,19),e.qZA()())}function p_(n,s){1&n&&(e.TgZ(0,"span",20),e.SDv(1,21),e.qZA())}function u_(n,s){1&n&&(e.TgZ(0,"span",20),e.SDv(1,22),e.qZA())}function m_(n,s){if(1&n&&e._UZ(0,"cd-date-time-picker",23),2&n){const t=e.oxw();e.Q6J("control",t.moveForm.get("expiresAt"))}}let g_=(()=>{class n{constructor(t,o,i,_,r){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=_,this.taskWrapper=r,this.createForm()}createForm(){this.moveForm=this.fb.group({expiresAt:["",[z.h.custom("format",t=>!(""===t||Oe()(t,"YYYY-MM-DD HH:mm:ss").isValid())),z.h.custom("expired",t=>Oe()().isAfter(t))]]})}ngOnInit(){this.imageSpec=new v.N(this.poolName,this.namespace,this.imageName),this.imageSpecStr=this.imageSpec.toString(),this.pattern=`${this.poolName}/${this.imageName}`}moveImage(){let t=0;const o=this.moveForm.getValue("expiresAt");o&&(t=Oe()(o,"YYYY-MM-DD HH:mm:ss").diff(Oe()(),"seconds",!0)),t<0&&(t=0),this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/trash/move",{image_spec:this.imageSpecStr}),call:this.rbdService.moveTrash(this.imageSpec,t)}).subscribe({complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(H),e.Y36(L.Kz),e.Y36(D.p4),e.Y36(dt.O),e.Y36(m.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-move-modal"]],decls:23,vars:9,consts:function(){let s,t,o,i,_,r,l;return s="Move an image to trash",t="To move " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "" + "\ufffd0\ufffd" + "" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + " to trash, click " + "[\ufffd#10\ufffd|\ufffd#11\ufffd]" + "Move" + "[\ufffd/#10\ufffd|\ufffd/#11\ufffd]" + ". Optionally, you can pick an expiration date.",t=e.Zx4(t),o="Protection expires at",i="NOT PROTECTED",_="This image contains snapshot(s), which will prevent it from being removed after moved to trash.",r="Wrong date format. Please use \"YYYY-MM-DD HH:mm:ss\".",l="Protection has already expired. Please pick a future date or leave it empty.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","moveForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],["class","alert alert-warning","role","alert",4,"ngIf"],t,[1,"form-group"],["for","expiresAt",1,"col-form-label"],o,["type","text","placeholder",i,"formControlName","expiresAt","triggers","manual",1,"form-control",3,"ngbPopover","click","keypress"],["p","ngbPopover"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["popContent",""],["role","alert",1,"alert","alert-warning"],_,[1,"invalid-feedback"],r,l,[3,"control"]]},template:function(t,o){if(1&t){const i=e.EpF();e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6),e.YNc(7,d_,3,0,"div",7),e.TgZ(8,"p"),e.tHW(9,8),e._UZ(10,"kbd")(11,"kbd"),e.N_p(),e.qZA(),e.TgZ(12,"div",9)(13,"label",10),e.SDv(14,11),e.qZA(),e.TgZ(15,"input",12,13),e.NdJ("click",function(){return e.CHM(i),e.MAs(16).open()})("keypress",function(){return e.CHM(i),e.MAs(16).close()}),e.qZA(),e.YNc(17,p_,2,0,"span",14),e.YNc(18,u_,2,0,"span",14),e.qZA()(),e.TgZ(19,"div",15)(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return o.moveImage()}),e.qZA()()(),e.BQk(),e.qZA(),e.YNc(21,m_,1,1,"ng-template",null,17,e.W1O)}if(2&t){const i=e.MAs(5),_=e.MAs(22);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.moveForm),e.xp6(3),e.Q6J("ngIf",o.hasSnapshots),e.xp6(4),e.pQV(o.imageSpecStr),e.QtT(9),e.xp6(4),e.Q6J("ngbPopover",_),e.xp6(2),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"format")),e.xp6(1),e.Q6J("ngIf",o.moveForm.showError("expiresAt",i,"expired")),e.xp6(2),e.Q6J("form",o.moveForm)("submitText",o.actionLabels.MOVE)}},directives:[f.z,a._Y,a.JL,a.sg,P.V,c.O5,h.P,$.o,a.Fj,K.b,a.JJ,a.u,L.o8,j.p,c_.J],styles:[""]}),n})();const We=function(){return{exact:!0}};function T_(n,s){1&n&&(e.TgZ(0,"li",1)(1,"a",9),e.SDv(2,10),e.qZA()()),2&n&&(e.xp6(1),e.Q6J("routerLinkActiveOptions",e.DdM(1,We)))}let et=(()=>{class n{constructor(t){this.authStorageService=t,this.grafanaPermission=this.authStorageService.getPermissions().grafana}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-tabs"]],decls:11,vars:7,consts:function(){let s,t,o,i;return s="Images",t="Namespaces",o="Trash",i="Overall Performance",[[1,"nav","nav-tabs"],[1,"nav-item"],["routerLink","/block/rbd","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],s,["routerLink","/block/rbd/namespaces","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],t,["routerLink","/block/rbd/trash","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],o,["class","nav-item",4,"ngIf"],["routerLink","/block/rbd/performance","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],i]},template:function(t,o){1&t&&(e.TgZ(0,"ul",0)(1,"li",1)(2,"a",2),e.SDv(3,3),e.qZA()(),e.TgZ(4,"li",1)(5,"a",4),e.SDv(6,5),e.qZA()(),e.TgZ(7,"li",1)(8,"a",6),e.SDv(9,7),e.qZA()(),e.YNc(10,T_,3,2,"li",8),e.qZA()),2&t&&(e.xp6(2),e.Q6J("routerLinkActiveOptions",e.DdM(4,We)),e.xp6(3),e.Q6J("routerLinkActiveOptions",e.DdM(5,We)),e.xp6(3),e.Q6J("routerLinkActiveOptions",e.DdM(6,We)),e.xp6(2),e.Q6J("ngIf",o.grafanaPermission.read))},directives:[g.yS,g.Od,c.O5],styles:[""]}),n})();var f_=p(25917),Lt=p(51295),pt=p(60737),C_=p(74255),vt=p(71099),$t=p(79765);function S_(n,s){1&n&&(e.TgZ(0,"span",16),e.SDv(1,17),e.qZA())}function R_(n,s){if(1&n&&(e.TgZ(0,"span"),e.tHW(1,18),e._UZ(2,"b"),e.N_p(),e.qZA()),2&n){const t=e.oxw();e.xp6(2),e.pQV(t.imageName),e.QtT(1)}}function E_(n,s){1&n&&(e.TgZ(0,"cd-helper"),e.SDv(1,25),e.qZA())}function M_(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"div",7)(1,"div",20)(2,"div",21)(3,"input",22),e.NdJ("change",function(){return e.CHM(t),e.oxw(2).onMirrorCheckBoxChange()}),e.qZA(),e.TgZ(4,"label",23),e.SDv(5,24),e.qZA(),e.YNc(6,E_,2,0,"cd-helper",13),e.qZA()()()}if(2&n){const t=s.ngIf;e.xp6(3),e.uIk("disabled",!(t.length>0)||null),e.xp6(3),e.Q6J("ngIf",!t.length>0)}}function O_(n,s){if(1&n&&(e.ynx(0),e.YNc(1,M_,7,2,"div",19),e.ALo(2,"async"),e.BQk()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf",e.lcZ(2,1,t.peerConfigured$))}}let h_=(()=>{class n{constructor(t,o,i,_,r,l){this.activeModal=t,this.rbdService=o,this.taskManagerService=i,this.notificationService=_,this.actionLabels=r,this.rbdMirrorService=l,this.editing=!1,this.onSubmit=new $t.xQ,this.action=this.actionLabels.CREATE,this.resource="RBD Snapshot",this.createForm()}createForm(){this.snapshotForm=new Z.d({snapshotName:new a.NI("",{validators:[a.kI.required]}),mirrorImageSnapshot:new a.NI(!1,{})})}ngOnInit(){this.peerConfigured$=this.rbdMirrorService.getPeerForPool(this.poolName)}setSnapName(t){this.snapName=t,this.snapshotForm.get("snapshotName").setValue(t)}onMirrorCheckBoxChange(){!0===this.snapshotForm.getValue("mirrorImageSnapshot")?(this.snapshotForm.get("snapshotName").setValue(""),this.snapshotForm.get("snapshotName").clearValidators()):(this.snapshotForm.get("snapshotName").setValue(this.snapName),this.snapshotForm.get("snapshotName").setValidators([a.kI.required]))}setEditing(t=!0){this.editing=t,this.action=this.editing?this.actionLabels.RENAME:this.actionLabels.CREATE}editAction(){const t=this.snapshotForm.getValue("snapshotName"),o=new v.N(this.poolName,this.namespace,this.imageName),i=new M.R;i.name="rbd/snap/edit",i.metadata={image_spec:o.toString(),snapshot_name:t},this.rbdService.renameSnapshot(o,this.snapName,t).toPromise().then(()=>{this.taskManagerService.subscribe(i.name,i.metadata,_=>{this.notificationService.notifyTask(_)}),this.activeModal.close(),this.onSubmit.next(this.snapName)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}createAction(){const t=this.snapshotForm.getValue("snapshotName"),o=this.snapshotForm.getValue("mirrorImageSnapshot"),i=new v.N(this.poolName,this.namespace,this.imageName),_=new M.R;_.name="rbd/snap/create",_.metadata={image_spec:i.toString(),snapshot_name:t},this.rbdService.createSnapshot(i,t,o).toPromise().then(()=>{this.taskManagerService.subscribe(_.name,_.metadata,r=>{this.notificationService.notifyTask(r)}),this.activeModal.close(),this.onSubmit.next(t)}).catch(()=>{this.snapshotForm.setErrors({cdSubmitButton:!0})})}submit(){this.editing?this.editAction():this.createAction()}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(H),e.Y36(vt.k),e.Y36(ve.g),e.Y36(D.p4),e.Y36(q))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-form-modal"]],decls:21,vars:18,consts:function(){let s,t,o,i,_,r;return s="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",t="Name",o="This field is required.",i="Snapshot mode is enabled on image " + "\ufffd#2\ufffd" + "" + "\ufffd0\ufffd" + "" + "\ufffd/#2\ufffd" + ": snapshot names are auto generated",_="Mirror Image Snapshot",r="The peer must be registered to do this action.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","snapshotForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","snapshotName",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["type","text","placeholder","Snapshot name...","id","snapshotName","name","snapshotName","formControlName","snapshotName","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],o,i,["class","form-group row",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["type","checkbox","formControlName","mirrorImageSnapshot","name","mirrorImageSnapshot","id","mirrorImageSnapshot",1,"custom-control-input",3,"change"],["for","mirrorImageSnapshot",1,"custom-control-label"],_,r]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,S_,2,0,"span",12),e.YNc(15,R_,3,1,"span",13),e.qZA()(),e.YNc(16,O_,3,3,"ng-container",13),e.qZA(),e.TgZ(17,"div",14)(18,"cd-form-button-panel",15),e.NdJ("submitActionEvent",function(){return o.submit()}),e.ALo(19,"titlecase"),e.ALo(20,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,10,o.action))(e.lcZ(4,12,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.snapshotForm),e.xp6(7),e.uIk("disabled","snapshot"===o.mirroring&&!0===o.snapshotForm.getValue("mirrorImageSnapshot")||null),e.xp6(1),e.Q6J("ngIf",o.snapshotForm.showError("snapshotName",i,"required")),e.xp6(1),e.Q6J("ngIf","snapshot"===o.mirroring&&!0===o.snapshotForm.getValue("mirrorImageSnapshot")||null),e.xp6(1),e.Q6J("ngIf","snapshot"===o.mirroring||null),e.xp6(2),e.Q6J("form",o.snapshotForm)("submitText",e.lcZ(19,14,o.action)+" "+e.lcZ(20,16,o.resource))}},directives:[f.z,a._Y,a.JL,a.sg,P.V,h.P,$.o,a.Fj,K.b,a.JJ,a.u,Me.U,c.O5,a.Wl,Pt.S,j.p],pipes:[c.rS,tt.m,c.Ov],styles:[""]}),n})();class A_{constructor(s,t,o){this.featuresName=t,this.cloneFormatVersion=1,o.cloneFormatVersion().subscribe(i=>{this.cloneFormatVersion=i}),this.create={permission:"create",icon:T.P.add,name:s.CREATE},this.rename={permission:"update",icon:T.P.edit,name:s.RENAME,disable:i=>this.disableForMirrorSnapshot(i)||!i.hasSingleSelection},this.protect={permission:"update",icon:T.P.lock,visible:i=>i.hasSingleSelection&&!i.first().is_protected,name:s.PROTECT,disable:i=>this.disableForMirrorSnapshot(i)},this.unprotect={permission:"update",icon:T.P.unlock,visible:i=>i.hasSingleSelection&&i.first().is_protected,name:s.UNPROTECT,disable:i=>this.disableForMirrorSnapshot(i)},this.clone={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>this.getCloneDisableDesc(i,this.featuresName)||this.disableForMirrorSnapshot(i),icon:T.P.clone,name:s.CLONE},this.copy={permission:"create",canBePrimary:i=>i.hasSingleSelection,disable:i=>!i.hasSingleSelection||i.first().cdExecuting||this.disableForMirrorSnapshot(i),icon:T.P.copy,name:s.COPY},this.rollback={permission:"update",icon:T.P.undo,name:s.ROLLBACK,disable:i=>this.disableForMirrorSnapshot(i)||!i.hasSingleSelection},this.deleteSnap={permission:"delete",icon:T.P.destroy,disable:i=>{const _=i.first();return!i.hasSingleSelection||_.cdExecuting||_.is_protected||this.disableForMirrorSnapshot(i)},name:s.DELETE},this.ordering=[this.create,this.rename,this.protect,this.unprotect,this.clone,this.copy,this.rollback,this.deleteSnap]}getCloneDisableDesc(s,t){return!(s.hasSingleSelection&&!s.first().cdExecuting)||((null==t?void 0:t.includes("layering"))?1===this.cloneFormatVersion&&!s.first().is_protected&&"Snapshot must be protected in order to clone.":"Parent image must support Layering")}disableForMirrorSnapshot(s){return s.hasSingleSelection&&"snapshot"===s.first().mirror_mode&&s.first().name.includes(".mirror.")}}class P_{}var Ge=p(96102);const I_=["nameTpl"],b_=["rollbackTpl"];function N_(n,s){if(1&n&&(e.ynx(0),e.SDv(1,3),e.BQk(),e.TgZ(2,"strong"),e._uU(3),e.qZA(),e._uU(4,".\n")),2&n){const t=s.$implicit;e.xp6(3),e.hij(" ",t.snapName,"")}}let F_=(()=>{class n{constructor(t,o,i,_,r,l,d,u,S,E,I){this.authStorageService=t,this.modalService=o,this.dimlessBinaryPipe=i,this.cdDatePipe=_,this.rbdService=r,this.taskManagerService=l,this.notificationService=d,this.summaryService=u,this.taskListService=S,this.actionLabels=E,this.cdr=I,this.snapshots=[],this.selection=new Re.r,this.builders={"rbd/snap/create":B=>{const y=new P_;return y.name=B.snapshot_name,y}},this.permission=this.authStorageService.getPermissions().rbdImage}ngOnInit(){this.columns=[{name:"Name",prop:"name",cellTransformation:Le.e.executing,flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"Provisioned",prop:"disk_usage",flexGrow:1,cellClass:"text-right",pipe:this.dimlessBinaryPipe},{name:"State",prop:"is_protected",flexGrow:1,cellTransformation:Le.e.badge,customTemplateConfig:{map:{true:{value:"PROTECTED",class:"badge-success"},false:{value:"UNPROTECTED",class:"badge-info"}}}},{name:"Created",prop:"timestamp",flexGrow:1,pipe:this.cdDatePipe}],this.imageSpec=new v.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions=new A_(this.actionLabels,this.featuresName,this.rbdService),this.rbdTableActions.create.click=()=>this.openCreateSnapshotModal(),this.rbdTableActions.rename.click=()=>this.openEditSnapshotModal(),this.rbdTableActions.protect.click=()=>this.toggleProtection(),this.rbdTableActions.unprotect.click=()=>this.toggleProtection();const t=()=>this.selection.first()&&`${this.imageSpec.toStringEncoded()}/${encodeURIComponent(this.selection.first().name)}`;this.rbdTableActions.clone.routerLink=()=>`/block/rbd/clone/${t()}`,this.rbdTableActions.copy.routerLink=()=>`/block/rbd/copy/${t()}`,this.rbdTableActions.rollback.click=()=>this.rollbackModal(),this.rbdTableActions.deleteSnap.click=()=>this.deleteSnapshotModal(),this.tableActions=this.rbdTableActions.ordering,this.taskListService.init(()=>(0,f_.of)(this.snapshots),null,_=>{Lt.T.updateChanged(this,{data:_})&&(this.cdr.detectChanges(),this.data=[...this.data])},()=>{Lt.T.updateChanged(this,{data:this.snapshots})&&(this.cdr.detectChanges(),this.data=[...this.data])},_=>["rbd/snap/create","rbd/snap/delete","rbd/snap/edit","rbd/snap/rollback"].includes(_.name)&&this.imageSpec.toString()===_.metadata.image_spec,(_,r)=>_.name===r.metadata.snapshot_name,this.builders)}ngOnChanges(){this.columns&&(this.imageSpec=new v.N(this.poolName,this.namespace,this.rbdName),this.rbdTableActions&&(this.rbdTableActions.featuresName=this.featuresName),this.taskListService.fetch())}openSnapshotModal(t,o=null){this.modalRef=this.modalService.show(h_,{mirroring:this.mirroring}),this.modalRef.componentInstance.poolName=this.poolName,this.modalRef.componentInstance.imageName=this.rbdName,this.modalRef.componentInstance.namespace=this.namespace,o?this.modalRef.componentInstance.setEditing():o=`${this.rbdName}_${Oe()().toISOString(!0)}`,this.modalRef.componentInstance.setSnapName(o),this.modalRef.componentInstance.onSubmit.subscribe(_=>{const r=new pt.o;r.name=t,r.metadata={image_spec:this.imageSpec.toString(),snapshot_name:_},this.summaryService.addRunningTask(r)})}openCreateSnapshotModal(){this.openSnapshotModal("rbd/snap/create")}openEditSnapshotModal(){this.openSnapshotModal("rbd/snap/edit",this.selection.first().name)}toggleProtection(){const t=this.selection.first().name,o=this.selection.first().is_protected,i=new M.R;i.name="rbd/snap/edit";const _=new v.N(this.poolName,this.namespace,this.rbdName);i.metadata={image_spec:_.toString(),snapshot_name:t},this.rbdService.protectSnapshot(_,t,!o).toPromise().then(()=>{const r=new pt.o;r.name=i.name,r.metadata=i.metadata,this.summaryService.addRunningTask(r),this.taskManagerService.subscribe(i.name,i.metadata,l=>{this.notificationService.notifyTask(l)})})}_asyncTask(t,o,i){const _=new M.R;_.name=o,_.metadata={image_spec:new v.N(this.poolName,this.namespace,this.rbdName).toString(),snapshot_name:i};const r=new v.N(this.poolName,this.namespace,this.rbdName);this.rbdService[t](r,i).toPromise().then(()=>{const l=new pt.o;l.name=_.name,l.metadata=_.metadata,this.summaryService.addRunningTask(l),this.modalRef.close(),this.taskManagerService.subscribe(l.name,l.metadata,d=>{this.notificationService.notifyTask(d)})}).catch(()=>{this.modalRef.componentInstance.stopLoadingSpinner()})}rollbackModal(){const t=this.selection.selected[0].name,o=new v.N(this.poolName,this.namespace,this.rbdName).toString(),i={titleText:"RBD snapshot rollback",buttonText:"Rollback",bodyTpl:this.rollbackTpl,bodyData:{snapName:`${o}@${t}`},onSubmit:()=>{this._asyncTask("rollbackSnapshot","rbd/snap/rollback",t)}};this.modalRef=this.modalService.show(lt.Y,i)}deleteSnapshotModal(){const t=this.selection.selected[0].name;this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD snapshot",itemNames:[t],submitAction:()=>this._asyncTask("deleteSnapshot","rbd/snap/delete",t)})}updateSelection(t){this.selection=t}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(de.Z),e.Y36($e.$),e.Y36(Ge.N),e.Y36(H),e.Y36(vt.k),e.Y36(ve.g),e.Y36(C_.J),e.Y36(ce.j),e.Y36(D.p4),e.Y36(e.sBO))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-snapshot-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(I_,5),e.Gf(b_,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.rollbackTpl=i.first)}},inputs:{snapshots:"snapshots",featuresName:"featuresName",poolName:"poolName",namespace:"namespace",mirroring:"mirroring",primary:"primary",rbdName:"rbdName"},features:[e._Bn([ce.j]),e.TTD],decls:4,vars:5,consts:function(){let s;return s="You are about to rollback",[["columnMode","flex","selectionType","single",3,"data","columns","updateSelection"],[1,"table-actions",3,"permission","selection","tableActions"],["rollbackTpl",""],s]},template:function(t,o){1&t&&(e.TgZ(0,"cd-table",0),e.NdJ("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(1,"cd-table-actions",1),e.qZA(),e.YNc(2,N_,5,1,"ng-template",null,2,e.W1O)),2&t&&(e.Q6J("data",o.data)("columns",o.columns),e.xp6(1),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[W.a,Ee.K],styles:[""],changeDetection:0}),n})();var D_=p(42176),Bt=p(76317),L_=p(41039);const v_=["poolConfigurationSourceTpl"];function $_(n,s){1&n&&(e.ynx(0),e.tHW(1,3),e._UZ(2,"strong"),e.N_p(),e.BQk())}function B_(n,s){if(1&n&&(e.TgZ(0,"span")(1,"span",38),e._uU(2),e.qZA()()),2&n){const t=s.$implicit;e.xp6(2),e.Oqu(t)}}function G_(n,s){if(1&n&&(e.TgZ(0,"span")(1,"span",39),e.SDv(2,40),e.qZA()()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function y_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.disk_usage)," ")}}function x_(n,s){if(1&n&&(e.TgZ(0,"span")(1,"span",39),e.SDv(2,41),e.qZA()()),2&n){e.oxw(3);const t=e.MAs(1);e.xp6(1),e.Q6J("ngbTooltip",t)}}function Z_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,t.selection.total_disk_usage)," ")}}function w_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(4);e.xp6(1),e.hij("/",t.selection.parent.pool_namespace,"")}}function H_(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,w_,2,1,"span",1),e._uU(3),e.qZA()),2&n){const t=e.oxw(3);e.xp6(1),e.Oqu(t.selection.parent.pool_name),e.xp6(1),e.Q6J("ngIf",t.selection.parent.pool_namespace),e.xp6(1),e.AsE("/",t.selection.parent.image_name,"@",t.selection.parent.snap_name,"")}}function k_(n,s){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function q_(n,s){if(1&n&&(e.TgZ(0,"table",17)(1,"tbody")(2,"tr")(3,"td",18),e.SDv(4,19),e.qZA(),e.TgZ(5,"td",20),e._uU(6),e.qZA()(),e.TgZ(7,"tr")(8,"td",21),e.SDv(9,22),e.qZA(),e.TgZ(10,"td"),e._uU(11),e.qZA()(),e.TgZ(12,"tr")(13,"td",21),e.SDv(14,23),e.qZA(),e.TgZ(15,"td"),e._uU(16),e.ALo(17,"empty"),e.qZA()(),e.TgZ(18,"tr")(19,"td",21),e.SDv(20,24),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.ALo(23,"cdDate"),e.qZA()(),e.TgZ(24,"tr")(25,"td",21),e.SDv(26,25),e.qZA(),e.TgZ(27,"td"),e._uU(28),e.ALo(29,"dimlessBinary"),e.qZA()(),e.TgZ(30,"tr")(31,"td",21),e.SDv(32,26),e.qZA(),e.TgZ(33,"td"),e._uU(34),e.ALo(35,"dimless"),e.qZA()(),e.TgZ(36,"tr")(37,"td",21),e.SDv(38,27),e.qZA(),e.TgZ(39,"td"),e._uU(40),e.ALo(41,"dimlessBinary"),e.qZA()(),e.TgZ(42,"tr")(43,"td",21),e.SDv(44,28),e.qZA(),e.TgZ(45,"td"),e.YNc(46,B_,3,1,"span",29),e.qZA()(),e.TgZ(47,"tr")(48,"td",21),e.SDv(49,30),e.qZA(),e.TgZ(50,"td"),e.YNc(51,G_,3,1,"span",1),e.YNc(52,y_,3,3,"span",1),e.qZA()(),e.TgZ(53,"tr")(54,"td",21),e.SDv(55,31),e.qZA(),e.TgZ(56,"td"),e.YNc(57,x_,3,1,"span",1),e.YNc(58,Z_,3,3,"span",1),e.qZA()(),e.TgZ(59,"tr")(60,"td",21),e.SDv(61,32),e.qZA(),e.TgZ(62,"td"),e._uU(63),e.ALo(64,"dimlessBinary"),e.qZA()(),e.TgZ(65,"tr")(66,"td",21),e.SDv(67,33),e.qZA(),e.TgZ(68,"td"),e._uU(69),e.qZA()(),e.TgZ(70,"tr")(71,"td",21),e.SDv(72,34),e.qZA(),e.TgZ(73,"td"),e.YNc(74,H_,4,4,"span",1),e.YNc(75,k_,2,0,"span",1),e.qZA()(),e.TgZ(76,"tr")(77,"td",21),e.SDv(78,35),e.qZA(),e.TgZ(79,"td"),e._uU(80),e.qZA()(),e.TgZ(81,"tr")(82,"td",21),e.SDv(83,36),e.qZA(),e.TgZ(84,"td"),e._uU(85),e.qZA()(),e.TgZ(86,"tr")(87,"td",21),e.SDv(88,37),e.qZA(),e.TgZ(89,"td"),e._uU(90),e.qZA()()()()),2&n){const t=e.oxw(2);e.xp6(6),e.Oqu(t.selection.name),e.xp6(5),e.Oqu(t.selection.pool_name),e.xp6(5),e.Oqu(e.lcZ(17,19,t.selection.data_pool)),e.xp6(6),e.Oqu(e.lcZ(23,21,t.selection.timestamp)),e.xp6(6),e.Oqu(e.lcZ(29,23,t.selection.size)),e.xp6(6),e.Oqu(e.lcZ(35,25,t.selection.num_objs)),e.xp6(6),e.Oqu(e.lcZ(41,27,t.selection.obj_size)),e.xp6(6),e.Q6J("ngForOf",t.selection.features_name),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Q6J("ngIf",-1===(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(1),e.Q6J("ngIf",-1!==(null==t.selection.features_name?null:t.selection.features_name.indexOf("fast-diff"))),e.xp6(5),e.Oqu(e.lcZ(64,29,t.selection.stripe_unit)),e.xp6(6),e.Oqu(t.selection.stripe_count),e.xp6(5),e.Q6J("ngIf",t.selection.parent),e.xp6(1),e.Q6J("ngIf",!t.selection.parent),e.xp6(5),e.Oqu(t.selection.block_name_prefix),e.xp6(5),e.Oqu(t.selection.order),e.xp6(5),e.Oqu(t.selection.image_format)}}function K_(n,s){if(1&n&&e._UZ(0,"cd-rbd-snapshot-list",42),2&n){const t=e.oxw(2);e.Q6J("snapshots",t.selection.snapshots)("featuresName",t.selection.features_name)("poolName",t.selection.pool_name)("primary",t.selection.primary)("namespace",t.selection.namespace)("mirroring",t.selection.mirror_mode)("rbdName",t.selection.name)}}function X_(n,s){if(1&n&&e._UZ(0,"cd-rbd-configuration-table",43),2&n){const t=e.oxw(2);e.Q6J("data",t.selection.configuration)}}function Q_(n,s){if(1&n&&e._UZ(0,"cd-grafana",44),2&n){const t=e.oxw(2);e.Q6J("grafanaPath",t.rbdDashboardUrl)("type","metrics")}}function z_(n,s){if(1&n&&(e.ynx(0),e.TgZ(1,"nav",4,5),e.ynx(3,6),e.TgZ(4,"a",7),e.SDv(5,8),e.qZA(),e.YNc(6,q_,91,31,"ng-template",9),e.BQk(),e.ynx(7,10),e.TgZ(8,"a",7),e.SDv(9,11),e.qZA(),e.YNc(10,K_,1,7,"ng-template",9),e.BQk(),e.ynx(11,12),e.TgZ(12,"a",7),e.SDv(13,13),e.qZA(),e.YNc(14,X_,1,1,"ng-template",9),e.BQk(),e.ynx(15,14),e.TgZ(16,"a",7),e.SDv(17,15),e.qZA(),e.YNc(18,Q_,1,2,"ng-template",9),e.BQk(),e.qZA(),e._UZ(19,"div",16),e.BQk()),2&n){const t=e.MAs(2);e.xp6(19),e.Q6J("ngbNavOutlet",t)}}function J_(n,s){1&n&&(e.ynx(0),e.TgZ(1,"cd-alert-panel",45),e.SDv(2,46),e.qZA(),e.BQk())}function Y_(n,s){1&n&&(e.ynx(0),e.TgZ(1,"strong",49),e.SDv(2,50),e.qZA(),e.BQk())}function V_(n,s){1&n&&(e.TgZ(0,"span",51),e.SDv(1,52),e.qZA())}function U_(n,s){if(1&n&&(e.YNc(0,Y_,3,0,"ng-container",47),e.YNc(1,V_,2,0,"ng-template",null,48,e.W1O)),2&n){const t=s.value,o=e.MAs(2);e.Q6J("ngIf",+t)("ngIfElse",o)}}let j_=(()=>{class n{ngOnChanges(){this.selection&&(this.rbdDashboardUrl=`rbd-details?var-Pool=${this.selection.pool_name}&var-Image=${this.selection.name}`)}}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-details"]],viewQuery:function(t,o){if(1&t&&(e.Gf(v_,7),e.Gf(L.Pz,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.poolConfigurationSourceTpl=i.first),e.iGM(i=e.CRH())&&(o.nav=i.first)}},inputs:{selection:"selection",images:"images"},features:[e.TTD],decls:6,vars:2,consts:function(){let s,t,o,i,_,r,l,d,u,S,E,I,B,y,Q,J,ee,te,w,_e,ae,O,ue,me,ge,Te,fe,Ce,Se;return s="Only available for RBD images with " + "\ufffd#2\ufffd" + "fast-diff" + "\ufffd/#2\ufffd" + " enabled",t="Details",o="Snapshots",i="Configuration",_="Performance",r="Name",l="Pool",d="Data Pool",u="Created",S="Size",E="Objects",I="Object size",B="Features",y="Provisioned",Q="Total provisioned",J="Striping unit",ee="Striping count",te="Parent",w="Block name prefix",_e="Order",ae="Format Version",O="N/A",ue="N/A",me="RBD details",ge="Information can not be displayed for RBD in status 'Removing'.",Te="This setting overrides the global value",fe="Image",Ce="This is the global value. No value for this option has been set for this image.",Se="Global",[["usageNotAvailableTooltipTpl",""],[4,"ngIf"],["poolConfigurationSourceTpl",""],s,["ngbNav","","cdStatefulTab","rbd-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],t,["ngbNavContent",""],["ngbNavItem","snapshots"],o,["ngbNavItem","configuration"],i,["ngbNavItem","performance"],_,[3,"ngbNavOutlet"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],r,[1,"w-75"],[1,"bold"],l,d,u,S,E,I,B,[4,"ngFor","ngForOf"],y,Q,J,ee,te,w,_e,ae,[1,"badge","badge-dark","me-2"],["placement","top",1,"form-text","text-muted",3,"ngbTooltip"],O,ue,[3,"snapshots","featuresName","poolName","primary","namespace","mirroring","rbdName"],[3,"data"],["title",me,"uid","YhCYGcuZz","grafanaStyle","one",3,"grafanaPath","type"],["type","warning"],ge,[4,"ngIf","ngIfElse"],["global",""],["ngbTooltip",Te],fe,["ngbTooltip",Ce],Se]},template:function(t,o){1&t&&(e.YNc(0,$_,3,0,"ng-template",null,0,e.W1O),e.YNc(2,z_,20,1,"ng-container",1),e.YNc(3,J_,3,0,"ng-container",1),e.YNc(4,U_,3,2,"ng-template",null,2,e.W1O)),2&t&&(e.xp6(2),e.Q6J("ngIf",o.selection&&"REMOVING"!==o.selection.source),e.xp6(1),e.Q6J("ngIf",o.selection&&"REMOVING"===o.selection.source))},directives:[c.O5,L.Pz,ht.m,L.nv,L.Vx,L.uN,c.sg,L._L,F_,D_.P,Bt.F,L.tO,nt.G],pipes:[L_.W,Ge.N,$e.$,it.n],styles:[""]}),n})();const W_=["usageTpl"],ea=["parentTpl"],ta=["nameTpl"],oa=["ScheduleTpl"],na=["mirroringTpl"],ia=["flattenTpl"],sa=["deleteTpl"],_a=["removingStatTpl"],aa=["provisionedNotAvailableTooltipTpl"],ra=["totalProvisionedNotAvailableTooltipTpl"],la=["forcePromoteConfirmation"];function ca(n,s){1&n&&e._UZ(0,"div",14),2&n&&e.Q6J("innerHtml","Only available for RBD images with fast-diff enabled",e.oJD)}function da(n,s){if(1&n&&(e.TgZ(0,"span",17),e.SDv(1,18),e.qZA()),2&n){const t=e.oxw(2);e.Q6J("ngbTooltip",t.usageNotAvailableTooltipTpl)}}function pa(n,s){if(1&n&&(e.SDv(0,19),e.ALo(1,"dimlessBinary")),2&n){const t=e.oxw().row;e.xp6(1),e.pQV(e.lcZ(1,1,t.disk_usage)),e.QtT(0)}}function ua(n,s){if(1&n&&(e.YNc(0,da,2,1,"span",15),e.YNc(1,pa,2,3,"ng-template",null,16,e.W1O)),2&n){const t=s.row,o=e.MAs(2);e.Q6J("ngIf",null===t.disk_usage&&!t.features_name.includes("fast-diff"))("ngIfElse",o)}}function ma(n,s){if(1&n&&(e.TgZ(0,"span",17),e.SDv(1,21),e.qZA()),2&n){const t=e.oxw(2);e.Q6J("ngbTooltip",t.usageNotAvailableTooltipTpl)}}function ga(n,s){if(1&n&&(e.SDv(0,22),e.ALo(1,"dimlessBinary")),2&n){const t=e.oxw().row;e.xp6(1),e.pQV(e.lcZ(1,1,t.total_disk_usage)),e.QtT(0)}}function Ta(n,s){if(1&n&&(e.YNc(0,ma,2,1,"span",15),e.YNc(1,ga,2,3,"ng-template",null,20,e.W1O)),2&n){const t=s.row,o=e.MAs(2);e.Q6J("ngIf",null===t.total_disk_usage&&!t.features_name.includes("fast-diff"))("ngIfElse",o)}}function fa(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.qZA()),2&n){const t=e.oxw(2).value;e.xp6(1),e.hij("/",t.pool_namespace,"")}}function Ca(n,s){if(1&n&&(e.TgZ(0,"span"),e._uU(1),e.YNc(2,fa,2,1,"span",23),e._uU(3),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t.pool_name),e.xp6(1),e.Q6J("ngIf",t.pool_namespace),e.xp6(1),e.AsE("/",t.image_name,"@",t.snap_name,"")}}function Sa(n,s){1&n&&(e.TgZ(0,"span"),e._uU(1,"-"),e.qZA())}function Ra(n,s){if(1&n&&(e.YNc(0,Ca,4,4,"span",23),e.YNc(1,Sa,2,0,"span",23)),2&n){const t=s.value;e.Q6J("ngIf",t),e.xp6(1),e.Q6J("ngIf",!t)}}function Ea(n,s){if(1&n&&(e.TgZ(0,"span",27),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t[0])}}function Ma(n,s){if(1&n&&(e.TgZ(0,"span",27),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t[1])}}function Oa(n,s){1&n&&(e.TgZ(0,"span",27),e.SDv(1,28),e.qZA())}function ha(n,s){1&n&&(e.TgZ(0,"span",27),e.SDv(1,29),e.qZA())}function Aa(n,s){if(1&n&&(e.TgZ(0,"span",27),e._uU(1),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(t)}}function Pa(n,s){if(1&n&&(e.YNc(0,Ea,2,1,"span",24),e._uU(1,"\xa0 "),e.YNc(2,Ma,2,1,"span",25),e._uU(3,"\xa0 "),e.YNc(4,Oa,2,0,"span",25),e.YNc(5,ha,2,0,"span",25),e.YNc(6,Aa,2,1,"ng-template",null,26,e.W1O)),2&n){const t=s.value,o=s.row,i=e.MAs(7);e.Q6J("ngIf",3===t.length)("ngIfElse",i),e.xp6(2),e.Q6J("ngIf",3===t.length),e.xp6(2),e.Q6J("ngIf",!0===o.primary),e.xp6(1),e.Q6J("ngIf",!1===o.primary)}}function Ia(n,s){if(1&n&&(e.TgZ(0,"span",27),e._uU(1),e.ALo(2,"cdDate"),e.qZA()),2&n){const t=e.oxw().value;e.xp6(1),e.Oqu(e.lcZ(2,1,t[2]))}}function ba(n,s){1&n&&e.YNc(0,Ia,3,3,"span",25),2&n&&e.Q6J("ngIf",3===s.value.length)}function Na(n,s){if(1&n&&(e._uU(0," You are about to flatten "),e.TgZ(1,"strong"),e._uU(2),e.qZA(),e._uU(3,". "),e._UZ(4,"br")(5,"br"),e._uU(6," All blocks will be copied from parent "),e.TgZ(7,"strong"),e._uU(8),e.qZA(),e._uU(9," to child "),e.TgZ(10,"strong"),e._uU(11),e.qZA(),e._uU(12,".\n")),2&n){const t=s.$implicit;e.xp6(2),e.Oqu(t.child),e.xp6(6),e.Oqu(t.parent),e.xp6(3),e.Oqu(t.child)}}function Fa(n,s){if(1&n&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.xp6(1),e.Oqu(t)}}function Da(n,s){if(1&n&&(e.ynx(0),e.TgZ(1,"span"),e.SDv(2,33),e.qZA(),e.TgZ(3,"ul"),e.YNc(4,Fa,2,1,"li",34),e.qZA(),e.BQk()),2&n){const t=e.oxw(2).snapshots;e.xp6(4),e.Q6J("ngForOf",t)}}function La(n,s){if(1&n&&(e.TgZ(0,"div",31)(1,"span"),e.SDv(2,32),e.qZA(),e._UZ(3,"br"),e.YNc(4,Da,5,1,"ng-container",23),e.qZA()),2&n){const t=e.oxw().snapshots;e.xp6(4),e.Q6J("ngIf",t.length>0)}}function va(n,s){1&n&&e.YNc(0,La,5,1,"div",30),2&n&&e.Q6J("ngIf",s.hasSnapshots)}const $a=function(n,s){return[n,s]};function Ba(n,s){if(1&n&&e._UZ(0,"i",36),2&n){const t=e.oxw(2);e.Q6J("ngClass",e.WLB(1,$a,t.icons.spinner,t.icons.spin))}}function Ga(n,s){if(1&n&&(e.TgZ(0,"span",36),e._uU(1),e.qZA()),2&n){const t=e.oxw(),o=t.column,i=t.row;e.Q6J("ngClass",null!=o&&null!=o.customTemplateConfig&&o.customTemplateConfig.executingClass?o.customTemplateConfig.executingClass:"text-muted italic"),e.xp6(1),e.hij(" (",i.cdExecuting,") ")}}function ya(n,s){if(1&n&&e._UZ(0,"i",38),2&n){const t=e.oxw(2);e.Gre("",t.icons.warning," warn")}}function xa(n,s){if(1&n&&(e.YNc(0,Ba,1,4,"i",35),e.TgZ(1,"span",36),e._uU(2),e.qZA(),e.YNc(3,Ga,2,2,"span",35),e.YNc(4,ya,1,3,"i",37)),2&n){const t=s.column,o=s.value,i=s.row;e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngClass",null==t||null==t.customTemplateConfig?null:t.customTemplateConfig.valueClass),e.xp6(1),e.hij(" ",o," "),e.xp6(1),e.Q6J("ngIf",i.cdExecuting),e.xp6(1),e.Q6J("ngIf",i.source&&"REMOVING"===i.source)}}function Za(n,s){if(1&n&&(e.TgZ(0,"cd-alert-panel",39),e._uU(1),e.qZA(),e.TgZ(2,"div",40),e.tHW(3,41),e._UZ(4,"strong"),e.N_p(),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Oqu(t.errorMessage)}}let Ha=(()=>{class n extends Ct.o{constructor(t,o,i,_,r,l,d,u,S){super(),this.authStorageService=t,this.rbdService=o,this.dimlessBinaryPipe=i,this.dimlessPipe=_,this.modalService=r,this.taskWrapper=l,this.taskListService=d,this.urlBuilder=u,this.actionLabels=S,this.tableStatus=new Dt.c("light"),this.selection=new Re.r,this.icons=T.P,this.count=0,this.tableContext=null,this.builders={"rbd/create":O=>this.createRbdFromTask(O.pool_name,O.namespace,O.image_name),"rbd/delete":O=>this.createRbdFromTaskImageSpec(O.image_spec),"rbd/clone":O=>this.createRbdFromTask(O.child_pool_name,O.child_namespace,O.child_image_name),"rbd/copy":O=>this.createRbdFromTask(O.dest_pool_name,O.dest_namespace,O.dest_image_name)},this.permission=this.authStorageService.getPermissions().rbdImage;const E=()=>this.selection.first()&&new v.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name).toStringEncoded();this.tableActions=[{permission:"create",icon:T.P.add,routerLink:()=>this.urlBuilder.getCreate(),canBePrimary:O=>!O.hasSingleSelection,name:this.actionLabels.CREATE},{permission:"update",icon:T.P.edit,routerLink:()=>this.urlBuilder.getEdit(E()),name:this.actionLabels.EDIT,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)},{permission:"create",canBePrimary:O=>O.hasSingleSelection,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||!!O.first().cdExecuting,icon:T.P.copy,routerLink:()=>`/block/rbd/copy/${E()}`,name:this.actionLabels.COPY},{permission:"update",disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||O.first().cdExecuting||!O.first().parent,icon:T.P.flatten,click:()=>this.flattenRbdModal(),name:this.actionLabels.FLATTEN},{permission:"update",icon:T.P.refresh,click:()=>this.resyncRbdModal(),name:this.actionLabels.RESYNC,disable:O=>this.getResyncDisableDesc(O)},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteRbdModal(),name:this.actionLabels.DELETE,disable:O=>this.getDeleteDisableDesc(O)},{permission:"delete",icon:T.P.trash,click:()=>this.trashRbdModal(),name:this.actionLabels.TRASH,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||O.first().image_format===Ie.V1},{permission:"update",icon:T.P.edit,click:()=>this.removeSchedulingModal(),name:this.actionLabels.REMOVE_SCHEDULING,disable:O=>this.getRemovingStatusDesc(O)||this.getInvalidNameDisable(O)||void 0===O.first().schedule_info},{permission:"update",icon:T.P.edit,click:()=>this.actionPrimary(!0),name:this.actionLabels.PROMOTE,visible:()=>null!=this.selection.first()&&!this.selection.first().primary},{permission:"update",icon:T.P.edit,click:()=>this.actionPrimary(!1),name:this.actionLabels.DEMOTE,visible:()=>null!=this.selection.first()&&this.selection.first().primary}]}createRbdFromTaskImageSpec(t){const o=v.N.fromString(t);return this.createRbdFromTask(o.poolName,o.namespace,o.imageName)}createRbdFromTask(t,o,i){const _=new cs;return _.id="-1",_.unique_id="-1",_.name=i,_.namespace=o,_.pool_name=t,_.image_format=Ie.V2,_}ngOnInit(){this.columns=[{name:"Name",prop:"name",flexGrow:2,cellTemplate:this.removingStatTpl},{name:"Pool",prop:"pool_name",flexGrow:2},{name:"Namespace",prop:"namespace",flexGrow:2},{name:"Size",prop:"size",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessBinaryPipe},{name:"Objects",prop:"num_objs",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessPipe},{name:"Object size",prop:"obj_size",flexGrow:1,cellClass:"text-right",sortable:!1,pipe:this.dimlessBinaryPipe},{name:"Provisioned",prop:"disk_usage",cellClass:"text-center",flexGrow:1,pipe:this.dimlessBinaryPipe,sortable:!1,cellTemplate:this.provisionedNotAvailableTooltipTpl},{name:"Total provisioned",prop:"total_disk_usage",cellClass:"text-center",flexGrow:1,pipe:this.dimlessBinaryPipe,sortable:!1,cellTemplate:this.totalProvisionedNotAvailableTooltipTpl},{name:"Parent",prop:"parent",flexGrow:2,sortable:!1,cellTemplate:this.parentTpl},{name:"Mirroring",prop:"mirror_mode",flexGrow:3,sortable:!1,cellTemplate:this.mirroringTpl},{name:"Next Scheduled Snapshot",prop:"mirror_mode",flexGrow:3,sortable:!1,cellTemplate:this.ScheduleTpl}],this.taskListService.init(i=>this.getRbdImages(i),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/clone","rbd/copy","rbd/create","rbd/delete","rbd/edit","rbd/flatten","rbd/trash/move"].includes(i.name),(i,_)=>{let r;switch(_.name){case"rbd/copy":r=new v.N(_.metadata.dest_pool_name,_.metadata.dest_namespace,_.metadata.dest_image_name).toString();break;case"rbd/clone":r=new v.N(_.metadata.child_pool_name,_.metadata.child_namespace,_.metadata.child_image_name).toString();break;case"rbd/create":r=new v.N(_.metadata.pool_name,_.metadata.namespace,_.metadata.image_name).toString();break;default:r=_.metadata.image_spec}return r===new v.N(i.pool_name,i.namespace,i.name).toString()},this.builders)}onFetchError(){this.table.reset(),this.tableStatus=new Dt.c("danger")}getRbdImages(t){var o;return null!==t&&(this.tableContext=t),null==this.tableContext&&(this.tableContext=new A.E(()=>{})),this.rbdService.list(null===(o=this.tableContext)||void 0===o?void 0:o.toParams())}prepareResponse(t){let o=[];return t.forEach(i=>{o=o.concat(i.value)}),o.forEach(i=>{if(void 0!==i.schedule_info){let _=[];const r="scheduled";let l=+new Date(i.schedule_info.schedule_time);const d=(new Date).getTimezoneOffset();l+=6e4*Math.abs(d),_.push(i.mirror_mode,r,l),i.mirror_mode=_,_=[]}}),this.count=o.length>0?r_.getCount(t[0]):0,o}updateSelection(t){this.selection=t}deleteRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=new v.N(t,o,i);this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD",itemNames:[_],bodyTemplate:this.deleteTpl,bodyContext:{hasSnapshots:this.hasSnapshots(),snapshots:this.listProtectedSnapshots()},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/delete",{image_spec:_.toString()}),call:this.rbdService.delete(_)})})}resyncRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=new v.N(t,o,i);this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD",itemNames:[_],actionDescription:"resync",submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/edit",{image_spec:_.toString()}),call:this.rbdService.update(_,{resync:!0})})})}trashRbdModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,hasSnapshots:this.hasSnapshots()};this.modalRef=this.modalService.show(g_,t)}flattenRbd(t){this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/flatten",{image_spec:t.toString()}),call:this.rbdService.flatten(t)}).subscribe({complete:()=>{this.modalRef.close()}})}flattenRbdModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().name,_=this.selection.first().parent,r=new v.N(_.pool_name,_.pool_namespace,_.image_name),l=new v.N(t,o,i),d={titleText:"RBD flatten",buttonText:"Flatten",bodyTpl:this.flattenTpl,bodyData:{parent:`${r}@${_.snap_name}`,child:l.toString()},onSubmit:()=>{this.flattenRbd(l)}};this.modalRef=this.modalService.show(lt.Y,d)}editRequest(){const t=new rt;return t.remove_scheduling=!t.remove_scheduling,t}removeSchedulingModal(){const t=this.selection.first().name,o=new v.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name);this.modalRef=this.modalService.show(pe.M,{actionDescription:"remove scheduling on",itemDescription:"image",itemNames:[`${t}`],submitActionObservable:()=>new Ot.y(i=>{this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/edit",{image_spec:o.toString()}),call:this.rbdService.update(o,this.editRequest())}).subscribe({error:_=>i.error(_),complete:()=>{this.modalRef.close()}})})})}actionPrimary(t){const o=new rt;o.primary=t,o.features=null;const i=new v.N(this.selection.first().pool_name,this.selection.first().namespace,this.selection.first().name);this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/edit",{image_spec:i.toString()}),call:this.rbdService.update(i,o)}).subscribe(()=>{},_=>{_.preventDefault(),t&&(this.errorMessage=_.error.detail.replace(/\[.*?\]\s*/,""),o.force=!0,this.modalRef=this.modalService.show(lt.Y,{titleText:"Warning",buttonText:"Enforce",warning:!0,bodyTpl:this.forcePromoteConfirmation,onSubmit:()=>{this.rbdService.update(i,o).subscribe(()=>{this.modalRef.close()},()=>{this.modalRef.close()})}}))})}hasSnapshots(){return(this.selection.first().snapshots||[]).length>0}hasClonedSnapshots(t){return(t.snapshots||[]).some(i=>i.children&&i.children.length>0)}listProtectedSnapshots(){return this.selection.first().snapshots.reduce((i,_)=>(_.is_protected&&i.push(_.name),i),[])}getDeleteDisableDesc(t){const o=t.first();return o&&this.hasClonedSnapshots(o)?"This RBD has cloned snapshots. Please delete related RBDs before deleting this RBD.":this.getInvalidNameDisable(t)||this.hasClonedSnapshots(t.first())}getResyncDisableDesc(t){const o=t.first();return o&&this.imageIsPrimary(o)?"Primary RBD images cannot be resynced":this.getInvalidNameDisable(t)}imageIsPrimary(t){return t.primary}getInvalidNameDisable(t){var o;const i=t.first();return(null===(o=null==i?void 0:i.name)||void 0===o?void 0:o.match(/[@/]/))?"This RBD image has an invalid name and can't be managed by ceph.":!t.first()||!t.hasSingleSelection}getRemovingStatusDesc(t){const o=t.first();return"REMOVING"===(null==o?void 0:o.source)&&"Action not possible for an RBD in status 'Removing'"}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36($e.$),e.Y36(it.n),e.Y36(de.Z),e.Y36(m.P),e.Y36(ce.j),e.Y36(ct.F),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(W.a,7),e.Gf(W_,5),e.Gf(ea,7),e.Gf(ta,5),e.Gf(oa,7),e.Gf(na,7),e.Gf(ia,7),e.Gf(sa,7),e.Gf(_a,7),e.Gf(aa,7),e.Gf(ra,7),e.Gf(la,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.usageTpl=i.first),e.iGM(i=e.CRH())&&(o.parentTpl=i.first),e.iGM(i=e.CRH())&&(o.nameTpl=i.first),e.iGM(i=e.CRH())&&(o.ScheduleTpl=i.first),e.iGM(i=e.CRH())&&(o.mirroringTpl=i.first),e.iGM(i=e.CRH())&&(o.flattenTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first),e.iGM(i=e.CRH())&&(o.removingStatTpl=i.first),e.iGM(i=e.CRH())&&(o.provisionedNotAvailableTooltipTpl=i.first),e.iGM(i=e.CRH())&&(o.totalProvisionedNotAvailableTooltipTpl=i.first),e.iGM(i=e.CRH())&&(o.forcePromoteConfirmation=i.first)}},features:[e._Bn([ce.j,{provide:ct.F,useValue:new ct.F("block/rbd")}]),e.qOj],decls:25,vars:13,consts:function(){let s,t,o,i,_,r,l,d,u,S;return s="N/A",t="" + "\ufffd0\ufffd" + "",o="N/A",i="" + "\ufffd0\ufffd" + "",_="primary",r="secondary",l="Deleting this image will also delete all its snapshots.",d="The following snapshots are currently protected and will be removed:",u="RBD in status 'Removing'",S="" + "\ufffd#4\ufffd" + " Do you want to force the operation? " + "\ufffd/#4\ufffd" + "",[["columnMode","flex","identifier","unique_id","forceIdentifier","true","selectionType","single",3,"data","columns","searchableObjects","serverSide","count","hasDetails","status","maxLimit","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["scheduleStatus",""],["provisionedNotAvailableTooltipTpl",""],["totalProvisionedNotAvailableTooltipTpl",""],["parentTpl",""],["mirroringTpl",""],["ScheduleTpl",""],["flattenTpl",""],["deleteTpl",""],["removingStatTpl",""],["forcePromoteConfirmation",""],[3,"innerHtml"],["placement","top",3,"ngbTooltip",4,"ngIf","ngIfElse"],["provisioned",""],["placement","top",3,"ngbTooltip"],s,t,["totalProvisioned",""],o,i,[4,"ngIf"],["class","badge badge-info",4,"ngIf","ngIfElse"],["class","badge badge-info",4,"ngIf"],["probb",""],[1,"badge","badge-info"],_,r,["class","alert alert-warning","role","alert",4,"ngIf"],["role","alert",1,"alert","alert-warning"],l,d,[4,"ngFor","ngForOf"],[3,"ngClass",4,"ngIf"],[3,"ngClass"],["title",u,3,"class",4,"ngIf"],["title",u],["type","warning"],[1,"m-4"],S]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0,1),e.NdJ("fetchData",function(_){return o.taskListService.fetch(_)})("setExpandedRow",function(_){return o.setExpandedRow(_)})("updateSelection",function(_){return o.updateSelection(_)}),e._UZ(3,"cd-table-actions",2)(4,"cd-rbd-details",3),e.qZA(),e.YNc(5,ca,1,1,"ng-template",null,4,e.W1O),e.YNc(7,ua,3,2,"ng-template",null,5,e.W1O),e.YNc(9,Ta,3,2,"ng-template",null,6,e.W1O),e.YNc(11,Ra,2,2,"ng-template",null,7,e.W1O),e.YNc(13,Pa,8,5,"ng-template",null,8,e.W1O),e.YNc(15,ba,1,1,"ng-template",null,9,e.W1O),e.YNc(17,Na,13,3,"ng-template",null,10,e.W1O),e.YNc(19,va,1,1,"ng-template",null,11,e.W1O),e.YNc(21,xa,5,5,"ng-template",null,12,e.W1O),e.YNc(23,Za,5,1,"ng-template",null,13,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("searchableObjects",!0)("serverSide",!0)("count",o.count)("hasDetails",!0)("status",o.tableStatus)("maxLimit",25)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("selection",o.expandedRow))},directives:[et,W.a,Ee.K,j_,c.O5,L._L,c.sg,c.mk,nt.G],pipes:[$e.$,Ge.N],styles:[".warn[_ngcontent-%COMP%]{color:#d48200}"]}),n})();function ka(n,s){1&n&&e._UZ(0,"input",19)}function qa(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,24),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Ka(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,25),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Xa(n,s){1&n&&(e.TgZ(0,"option",23),e.SDv(1,26),e.qZA()),2&n&&e.Q6J("ngValue",null)}function Qa(n,s){if(1&n&&(e.TgZ(0,"option",27),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t.pool_name),e.xp6(1),e.Oqu(t.pool_name)}}function za(n,s){if(1&n&&(e.TgZ(0,"select",20),e.YNc(1,qa,2,1,"option",21),e.YNc(2,Ka,2,1,"option",21),e.YNc(3,Xa,2,1,"option",21),e.YNc(4,Qa,2,2,"option",22),e.qZA()),2&n){const t=e.oxw();e.xp6(1),e.Q6J("ngIf",null===t.pools),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&0===t.pools.length),e.xp6(1),e.Q6J("ngIf",null!==t.pools&&t.pools.length>0),e.xp6(1),e.Q6J("ngForOf",t.pools)}}function Ja(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,29),e.qZA())}function Ya(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,30),e.qZA())}function Va(n,s){1&n&&(e.TgZ(0,"span",28),e.SDv(1,31),e.qZA())}let Ua=(()=>{class n{constructor(t,o,i,_,r,l){this.activeModal=t,this.actionLabels=o,this.authStorageService=i,this.notificationService=_,this.poolService=r,this.rbdService=l,this.pools=null,this.editing=!1,this.poolPermission=this.authStorageService.getPermissions().pool,this.createForm()}createForm(){this.namespaceForm=new Z.d({pool:new a.NI(""),namespace:new a.NI("")},this.validator(),this.asyncValidator())}validator(){return t=>{const o=t.get("pool"),i=t.get("namespace");let _=null;o.value||(_={required:!0}),o.setErrors(_);let r=null;return i.value||(r={required:!0}),i.setErrors(r),null}}asyncValidator(){return t=>new Promise(o=>{const i=t.get("pool"),_=t.get("namespace");this.rbdService.listNamespaces(i.value).subscribe(r=>{if(r.some(l=>l.namespace===_.value)){const l={namespaceExists:!0};_.setErrors(l),o(l)}else o(null)})})}ngOnInit(){this.onSubmit=new $t.xQ,this.poolPermission.read&&this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{const o=[];for(const i of t)this.rbdService.isRBDPool(i)&&"replicated"===i.type&&o.push(i);if(this.pools=o,1===this.pools.length){const i=this.pools[0].pool_name;this.namespaceForm.get("pool").setValue(i)}})}submit(){const t=this.namespaceForm.getValue("pool"),o=this.namespaceForm.getValue("namespace"),i=new M.R;i.name="rbd/namespace/create",i.metadata={pool:t,namespace:o},this.rbdService.createNamespace(t,o).toPromise().then(()=>{this.notificationService.show(ot.k.success,"Created namespace '" + t + "/" + o + "'"),this.activeModal.close(),this.onSubmit.next()}).catch(()=>{this.namespaceForm.setErrors({cdSubmitButton:!0})})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(L.Kz),e.Y36(D.p4),e.Y36(oe.j),e.Y36(ve.g),e.Y36(Ve.q),e.Y36(H))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-form-modal"]],decls:23,vars:9,consts:function(){let s,t,o,i,_,r,l,d,u;return s="Create Namespace",t="Pool",o="Name",i="Loading...",_="-- No rbd pools available --",r="-- Select a pool --",l="This field is required.",d="This field is required.",u="Namespace already exists.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","namespaceForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","pool",1,"cd-col-form-label","required"],t,[1,"cd-col-form-input"],["class","form-control","type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",4,"ngIf"],["id","pool","name","pool","class","form-select","formControlName","pool",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","namespace",1,"cd-col-form-label","required"],o,["type","text","placeholder","Namespace name...","id","namespace","name","namespace","formControlName","namespace","autofocus","",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder","Pool name...","id","pool","name","pool","formControlName","pool",1,"form-control"],["id","pool","name","pool","formControlName","pool",1,"form-select"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],i,_,r,[3,"value"],[1,"invalid-feedback"],l,d,u]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"div",7)(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e.YNc(11,ka,1,0,"input",11),e.YNc(12,za,5,4,"select",12),e.YNc(13,Ja,2,0,"span",13),e.qZA()(),e.TgZ(14,"div",7)(15,"label",14),e.SDv(16,15),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",16),e.YNc(19,Ya,2,0,"span",13),e.YNc(20,Va,2,0,"span",13),e.qZA()()(),e.TgZ(21,"div",17)(22,"cd-form-button-panel",18),e.NdJ("submitActionEvent",function(){return o.submit()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.namespaceForm),e.xp6(7),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("pool",i,"required")),e.xp6(6),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"required")),e.xp6(1),e.Q6J("ngIf",o.namespaceForm.showError("namespace",i,"namespaceExists")),e.xp6(2),e.Q6J("form",o.namespaceForm)("submitText",o.actionLabels.CREATE)}},directives:[f.z,a._Y,a.JL,a.sg,P.V,h.P,c.O5,$.o,a.Fj,K.b,a.JJ,a.u,a.EJ,a.YN,a.Kr,c.sg,Me.U,j.p],styles:[""]}),n})(),ja=(()=>{class n{constructor(t,o,i,_,r,l){this.authStorageService=t,this.rbdService=o,this.poolService=i,this.modalService=_,this.notificationService=r,this.actionLabels=l,this.selection=new Re.r,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"create",icon:T.P.add,click:()=>this.createModal(),name:this.actionLabels.CREATE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE,disable:()=>this.getDeleteDisableDesc()}]}ngOnInit(){this.columns=[{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Pool",prop:"pool",flexGrow:1},{name:"Total images",prop:"num_images",flexGrow:1}],this.refresh()}refresh(){this.poolService.list(["pool_name","type","application_metadata"]).then(t=>{t=t.filter(i=>this.rbdService.isRBDPool(i)&&"replicated"===i.type);const o=[];t.forEach(i=>{o.push(this.rbdService.listNamespaces(i.pool_name))}),o.length>0?(0,le.D)(o).subscribe(i=>{const _=[];for(let r=0;r{_.push({id:`${d}/${u.namespace}`,pool:d,namespace:u.namespace,num_images:u.num_images})})}this.namespaces=_}):this.namespaces=[]})}updateSelection(t){this.selection=t}createModal(){this.modalRef=this.modalService.show(Ua),this.modalRef.componentInstance.onSubmit.subscribe(()=>{this.refresh()})}deleteModal(){const t=this.selection.first().pool,o=this.selection.first().namespace;this.modalRef=this.modalService.show(pe.M,{itemDescription:"Namespace",itemNames:[`${t}/${o}`],submitAction:()=>this.rbdService.deleteNamespace(t,o).subscribe(()=>{this.notificationService.show(ot.k.success,"Deleted namespace '" + t + "/" + o + "'"),this.modalRef.close(),this.refresh()},()=>{this.modalRef.componentInstance.stopLoadingSpinner()})})}getDeleteDisableDesc(){var t;const o=this.selection.first();return(null==o?void 0:o.num_images)>0?"Namespace contains images":!(null===(t=this.selection)||void 0===t?void 0:t.first())}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36(Ve.q),e.Y36(de.Z),e.Y36(ve.g),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-namespace-list"]],features:[e._Bn([ce.j])],decls:4,vars:5,consts:[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"]],template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.refresh()})("updateSelection",function(_){return o.updateSelection(_)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.qZA()()),2&t&&(e.xp6(1),e.Q6J("data",o.namespaces)("columns",o.columns),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions))},directives:[et,W.a,Ee.K],styles:[""]}),n})(),Wa=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-performance"]],decls:2,vars:2,consts:function(){let s;return s="RBD overview",[["title",s,"uid","41FrpeUiz","grafanaStyle","two",3,"grafanaPath","type"]]},template:function(t,o){1&t&&e._UZ(0,"cd-rbd-tabs")(1,"cd-grafana",0),2&t&&(e.xp6(1),e.Q6J("grafanaPath","rbd-overview?")("type","metrics"))},directives:[et,Bt.F],styles:[""]}),n})();var er=p(91801);function tr(n,s){1&n&&e._UZ(0,"input",15)}function or(n,s){if(1&n&&(e.TgZ(0,"option",20),e._uU(1),e.qZA()),2&n){const t=s.$implicit;e.Q6J("value",t),e.xp6(1),e.Oqu(t)}}function nr(n,s){if(1&n&&(e.TgZ(0,"select",16)(1,"option",17),e.SDv(2,18),e.qZA(),e.YNc(3,or,2,2,"option",19),e.qZA()),2&n){const t=e.oxw();e.xp6(3),e.Q6J("ngForOf",t.pools)}}let ir=(()=>{class n{constructor(t,o,i,_,r,l,d){this.authStorageService=t,this.rbdService=o,this.activeModal=i,this.actionLabels=_,this.fb=r,this.poolService=l,this.taskWrapper=d,this.poolPermission=this.authStorageService.getPermissions().pool}createForm(){this.purgeForm=this.fb.group({poolName:""})}ngOnInit(){this.poolPermission.read&&this.poolService.list(["pool_name","application_metadata"]).then(t=>{this.pools=t.filter(o=>o.application_metadata.includes("rbd")).map(o=>o.pool_name)}),this.createForm()}purge(){const t=this.purgeForm.getValue("poolName")||"";this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/trash/purge",{pool_name:t}),call:this.rbdService.purgeTrash(t)}).subscribe({error:()=>{this.purgeForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36(L.Kz),e.Y36(D.p4),e.Y36(dt.O),e.Y36(Ve.q),e.Y36(m.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-purge-modal"]],decls:18,vars:6,consts:function(){let s,t,o,i,_;return s="Purge Trash",t="To purge, select\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "All" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + "\xA0 or one pool and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Purge" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".\xA0",t=e.Zx4(t),o="Pool:",i="Pool name...",_="All",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","purgeForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],[1,"col-form-label","mx-auto"],o,["class","form-control","type","text","placeholder",i,"formControlName","poolName",4,"ngIf"],["id","poolName","name","poolName","class","form-control","formControlName","poolName",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["type","text","placeholder",i,"formControlName","poolName",1,"form-control"],["id","poolName","name","poolName","formControlName","poolName",1,"form-control"],["value",""],_,[3,"value",4,"ngFor","ngForOf"],[3,"value"]]},template:function(t,o){1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.tHW(8,7),e._UZ(9,"kbd")(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e.YNc(14,tr,1,0,"input",11),e.YNc(15,nr,4,1,"select",12),e.qZA()(),e.TgZ(16,"div",13)(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.purge()}),e.qZA()()(),e.BQk(),e.qZA()),2&t&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.purgeForm),e.xp6(10),e.Q6J("ngIf",!o.poolPermission.read),e.xp6(1),e.Q6J("ngIf",o.poolPermission.read),e.xp6(2),e.Q6J("form",o.purgeForm)("submitText",o.actionLabels.PURGE))},directives:[f.z,a._Y,a.JL,a.sg,P.V,h.P,c.O5,$.o,a.Fj,K.b,a.JJ,a.u,a.EJ,a.YN,a.Kr,c.sg,j.p],styles:[""]}),n})();function sr(n,s){1&n&&(e.TgZ(0,"span",15),e.SDv(1,16),e.qZA())}let _r=(()=>{class n{constructor(t,o,i,_,r){this.rbdService=t,this.activeModal=o,this.actionLabels=i,this.fb=_,this.taskWrapper=r}ngOnInit(){this.imageSpec=new v.N(this.poolName,this.namespace,this.imageName).toString(),this.restoreForm=this.fb.group({name:this.imageName})}restore(){const t=this.restoreForm.getValue("name"),o=new v.N(this.poolName,this.namespace,this.imageId);this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/trash/restore",{image_id_spec:o.toString(),new_image_name:t}),call:this.rbdService.restoreTrash(o,t)}).subscribe({error:()=>{this.restoreForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close()}})}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(H),e.Y36(L.Kz),e.Y36(D.p4),e.Y36(dt.O),e.Y36(m.P))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-restore-modal"]],decls:18,vars:7,consts:function(){let s,t,o,i;return s="Restore Image",t="To restore\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "" + "\ufffd0\ufffd" + "@" + "\ufffd1\ufffd" + "" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ",\xA0 type the image's new name and click\xA0 " + "[\ufffd#9\ufffd|\ufffd#10\ufffd]" + "Restore" + "[\ufffd/#9\ufffd|\ufffd/#10\ufffd]" + ".",t=e.Zx4(t),o="New Name",i="This field is required.",[[3,"modalRef"],[1,"modal-title"],s,[1,"modal-content"],["name","restoreForm","novalidate","",1,"form",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],t,[1,"form-group"],["for","name",1,"col-form-label"],o,["type","text","name","name","id","name","autocomplete","off","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],i]},template:function(t,o){if(1&t&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"p"),e.tHW(8,7),e._UZ(9,"kbd")(10,"kbd"),e.N_p(),e.qZA(),e.TgZ(11,"div",8)(12,"label",9),e.SDv(13,10),e.qZA(),e._UZ(14,"input",11),e.YNc(15,sr,2,0,"span",12),e.qZA()(),e.TgZ(16,"div",13)(17,"cd-form-button-panel",14),e.NdJ("submitActionEvent",function(){return o.restore()}),e.qZA()()(),e.BQk(),e.qZA()),2&t){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.restoreForm),e.xp6(6),e.pQV(o.imageSpec)(o.imageId),e.QtT(8),e.xp6(5),e.Q6J("ngIf",o.restoreForm.showError("name",i,"required")),e.xp6(2),e.Q6J("form",o.restoreForm)("submitText",o.actionLabels.RESTORE)}},directives:[f.z,a._Y,a.JL,a.sg,P.V,h.P,$.o,a.Fj,K.b,a.JJ,a.u,Me.U,c.O5,j.p],styles:[""]}),n})();const ar=["expiresTpl"],rr=["deleteTpl"],lr=function(n){return[n]};function cr(n,s){if(1&n){const t=e.EpF();e.TgZ(0,"button",6),e.NdJ("click",function(){return e.CHM(t),e.oxw().purgeModal()}),e._UZ(1,"i",7),e.ynx(2),e.SDv(3,8),e.BQk(),e.qZA()}if(2&n){const t=e.oxw();e.Q6J("disabled",t.disablePurgeBtn),e.xp6(1),e.Q6J("ngClass",e.VKq(2,lr,t.icons.destroy))}}function dr(n,s){1&n&&(e.ynx(0),e.SDv(1,10),e.BQk())}function pr(n,s){1&n&&(e.ynx(0),e.SDv(1,11),e.BQk())}function ur(n,s){if(1&n&&(e.YNc(0,dr,2,0,"ng-container",9),e.YNc(1,pr,2,0,"ng-container",9),e._uU(2),e.ALo(3,"cdDate")),2&n){const t=s.row,o=s.value;e.Q6J("ngIf",t.cdIsExpired),e.xp6(1),e.Q6J("ngIf",!t.cdIsExpired),e.xp6(1),e.hij(" ",e.lcZ(3,3,o),"\n")}}function mr(n,s){if(1&n&&(e.TgZ(0,"p",13)(1,"strong"),e.ynx(2),e.SDv(3,14),e.ALo(4,"cdDate"),e.BQk(),e.qZA()()),2&n){const t=e.oxw().expiresAt;e.xp6(4),e.pQV(e.lcZ(4,1,t)),e.QtT(3)}}function gr(n,s){1&n&&e.YNc(0,mr,5,3,"p",12),2&n&&e.Q6J("ngIf",!s.isExpired)}let Tr=(()=>{class n{constructor(t,o,i,_,r,l,d){this.authStorageService=t,this.rbdService=o,this.modalService=i,this.cdDatePipe=_,this.taskListService=r,this.taskWrapper=l,this.actionLabels=d,this.icons=T.P,this.executingTasks=[],this.selection=new Re.r,this.tableStatus=new se.E,this.disablePurgeBtn=!0,this.permission=this.authStorageService.getPermissions().rbdImage,this.tableActions=[{permission:"update",icon:T.P.undo,click:()=>this.restoreModal(),name:this.actionLabels.RESTORE},{permission:"delete",icon:T.P.destroy,click:()=>this.deleteModal(),name:this.actionLabels.DELETE}]}ngOnInit(){this.columns=[{name:"ID",prop:"id",flexGrow:1,cellTransformation:Le.e.executing},{name:"Name",prop:"name",flexGrow:1},{name:"Pool",prop:"pool_name",flexGrow:1},{name:"Namespace",prop:"namespace",flexGrow:1},{name:"Status",prop:"deferment_end_time",flexGrow:1,cellTemplate:this.expiresTpl},{name:"Deleted At",prop:"deletion_time",flexGrow:1,pipe:this.cdDatePipe}],this.taskListService.init(()=>this.rbdService.listTrash(),i=>this.prepareResponse(i),i=>this.images=i,()=>this.onFetchError(),i=>["rbd/trash/remove","rbd/trash/restore"].includes(i.name),(i,_)=>new v.N(i.pool_name,i.namespace,i.id).toString()===_.metadata.image_id_spec,void 0)}prepareResponse(t){let o=[];const i={};let _;if(t.forEach(r=>{C().isUndefined(i[r.status])&&(i[r.status]=[]),i[r.status].push(r.pool_name),o=o.concat(r.value),this.disablePurgeBtn=!o.length}),i[3]?_=3:i[1]?_=1:i[2]&&(_=2),_){const r=(i[_].length>1?"pools ":"pool ")+i[_].join();this.tableStatus=new se.E(_,r)}else this.tableStatus=new se.E;return o.forEach(r=>{r.cdIsExpired=Oe()().isAfter(r.deferment_end_time)}),o}onFetchError(){this.table.reset(),this.tableStatus=new se.E(er.T.ValueException)}updateSelection(t){this.selection=t}restoreModal(){const t={poolName:this.selection.first().pool_name,namespace:this.selection.first().namespace,imageName:this.selection.first().name,imageId:this.selection.first().id};this.modalRef=this.modalService.show(_r,t)}deleteModal(){const t=this.selection.first().pool_name,o=this.selection.first().namespace,i=this.selection.first().id,_=this.selection.first().deferment_end_time,r=Oe()().isAfter(_),l=new v.N(t,o,i);this.modalRef=this.modalService.show(pe.M,{itemDescription:"RBD",itemNames:[l],bodyTemplate:this.deleteTpl,bodyContext:{expiresAt:_,isExpired:r},submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new M.R("rbd/trash/remove",{image_id_spec:l.toString()}),call:this.rbdService.removeTrash(l,!0)})})}purgeModal(){this.modalService.show(ir)}}return n.\u0275fac=function(t){return new(t||n)(e.Y36(oe.j),e.Y36(H),e.Y36(de.Z),e.Y36(Ge.N),e.Y36(ce.j),e.Y36(m.P),e.Y36(D.p4))},n.\u0275cmp=e.Xpm({type:n,selectors:[["cd-rbd-trash-list"]],viewQuery:function(t,o){if(1&t&&(e.Gf(W.a,7),e.Gf(ar,7),e.Gf(rr,7)),2&t){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.expiresTpl=i.first),e.iGM(i=e.CRH())&&(o.deleteTpl=i.first)}},features:[e._Bn([ce.j])],decls:9,vars:8,consts:function(){let s,t,o,i;return s="Purge Trash",t="Expired at",o="Protected until",i="This image is protected until " + "\ufffd0\ufffd" + ".",[["columnMode","flex","identifier","id","forceIdentifier","true","selectionType","single",3,"data","columns","status","autoReload","fetchData","updateSelection"],[1,"table-actions","btn-toolbar"],[1,"btn-group",3,"permission","selection","tableActions"],["class","btn btn-light","type","button",3,"disabled","click",4,"ngIf"],["expiresTpl",""],["deleteTpl",""],["type","button",1,"btn","btn-light",3,"disabled","click"],["aria-hidden","true",3,"ngClass"],s,[4,"ngIf"],t,o,["class","text-danger",4,"ngIf"],[1,"text-danger"],i]},template:function(t,o){1&t&&(e._UZ(0,"cd-rbd-tabs"),e.TgZ(1,"cd-table",0),e.NdJ("fetchData",function(){return o.taskListService.fetch()})("updateSelection",function(_){return o.updateSelection(_)}),e.TgZ(2,"div",1),e._UZ(3,"cd-table-actions",2),e.YNc(4,cr,4,4,"button",3),e.qZA()(),e.YNc(5,ur,4,5,"ng-template",null,4,e.W1O),e.YNc(7,gr,1,1,"ng-template",null,5,e.W1O)),2&t&&(e.xp6(1),e.Q6J("data",o.images)("columns",o.columns)("status",o.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("ngIf",o.permission.delete))},directives:[et,W.a,Ee.K,c.O5,$.o,c.mk],pipes:[Ge.N],styles:[""]}),n})(),Gt=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[c.ez,si,a.u5,a.UX,L.Oz,L.dT,L.HK,be.b,Ae.m,g.Bz,ne.xc]]}),n})();const fr=[{path:"",redirectTo:"rbd",pathMatch:"full"},{path:"rbd",canActivate:[U.T,re.P],data:{moduleStatusGuardConfig:{uiApiPath:"block/rbd",redirectTo:"error",header:"No RBD pools available",button_name:"Create RBD pool",button_route:"/pool/create"},breadcrumbs:"Images"},children:[{path:"",component:Ha},{path:"namespaces",component:ja,data:{breadcrumbs:"Namespaces"}},{path:"trash",component:Tr,data:{breadcrumbs:"Trash"}},{path:"performance",component:Wa,data:{breadcrumbs:"Overall Performance"}},{path:D.MQ.CREATE,component:Be,data:{breadcrumbs:D.Qn.CREATE}},{path:`${D.MQ.EDIT}/:image_spec`,component:Be,data:{breadcrumbs:D.Qn.EDIT}},{path:`${D.MQ.CLONE}/:image_spec/:snap`,component:Be,data:{breadcrumbs:D.Qn.CLONE}},{path:`${D.MQ.COPY}/:image_spec`,component:Be,data:{breadcrumbs:D.Qn.COPY}},{path:`${D.MQ.COPY}/:image_spec/:snap`,component:Be,data:{breadcrumbs:D.Qn.COPY}}]},{path:"mirroring",component:os,canActivate:[U.T,re.P],data:{moduleStatusGuardConfig:{uiApiPath:"block/mirroring",redirectTo:"error",header:"RBD mirroring is not configured",button_name:"Configure RBD Mirroring",button_title:"This will create rbd-mirror service and a replicated RBD pool",component:"RBD Mirroring",uiConfig:!0},breadcrumbs:"Mirroring"},children:[{path:`${D.MQ.EDIT}/:pool_name`,component:_s,outlet:"modal"}]},{path:"iscsi",canActivate:[U.T],data:{breadcrumbs:"iSCSI"},children:[{path:"",redirectTo:"overview",pathMatch:"full"},{path:"overview",component:ii,data:{breadcrumbs:"Overview"}},{path:"targets",data:{breadcrumbs:"Targets"},children:[{path:"",component:qn},{path:D.MQ.CREATE,component:ft,data:{breadcrumbs:D.Qn.CREATE}},{path:`${D.MQ.EDIT}/:target_iqn`,component:ft,data:{breadcrumbs:D.Qn.EDIT}}]}]}];let Cr=(()=>{class n{}return n.\u0275fac=function(t){return new(t||n)},n.\u0275mod=e.oAB({type:n}),n.\u0275inj=e.cJS({imports:[[Gt,g.Bz.forChild(fr)]]}),n})()},60950:(ut,he,p)=>{p.d(he,{d:()=>z});var c=p(89724),a=p(1659),g=p(23815),ne=p.n(g),L=p(7357),be=p(65862),D=p(95463),U=p(30633),re=p(28211),Ae=p(34089),Ne=p(41582),C=p(11048),le=p(56310),F=p(18372),ie=p(87925),Y=p(94276);let e=(()=>{class A{constructor(m,f){this.control=m,this.formatter=f}setValue(m){const f=this.formatter.toMilliseconds(m);this.control.control.setValue(`${f} ms`)}ngOnInit(){this.setValue(this.control.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.control.value))}onUpdate(m){this.setValue(m)}}return A.\u0275fac=function(m){return new(m||A)(c.Y36(a.a5),c.Y36(re.H))},A.\u0275dir=c.lG2({type:A,selectors:[["","cdMilliseconds",""]],hostBindings:function(m,f){1&m&&c.NdJ("blur",function(h){return f.onUpdate(h.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),A})();var X=p(20044);let Fe=(()=>{class A{constructor(m,f,P,h){this.elementRef=m,this.control=f,this.dimlessBinaryPerSecondPipe=P,this.formatter=h,this.ngModelChange=new c.vpe,this.el=this.elementRef.nativeElement}ngOnInit(){this.setValue(this.el.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.el.value))}setValue(m){/^[\d.]+$/.test(m)&&(m+=this.defaultUnit||"m");const f=this.formatter.toBytes(m,0),P=this.round(f);this.el.value=this.dimlessBinaryPerSecondPipe.transform(P),null!==f?(this.ngModelChange.emit(this.el.value),this.control.control.setValue(this.el.value)):(this.ngModelChange.emit(null),this.control.control.setValue(null))}round(m){if(null!==m&&0!==m){if(!ne().isUndefined(this.minBytes)&&mthis.maxBytes)return this.maxBytes;if(!ne().isUndefined(this.roundPower)){const f=Math.round(Math.log(m)/Math.log(this.roundPower));return Math.pow(this.roundPower,f)}}return m}onBlur(m){this.setValue(m)}}return A.\u0275fac=function(m){return new(m||A)(c.Y36(c.SBq),c.Y36(a.a5),c.Y36(X.O),c.Y36(re.H))},A.\u0275dir=c.lG2({type:A,selectors:[["","cdDimlessBinaryPerSecond",""]],hostBindings:function(m,f){1&m&&c.NdJ("blur",function(h){return f.onBlur(h.target.value)})},inputs:{ngDataReady:"ngDataReady",minBytes:"minBytes",maxBytes:"maxBytes",roundPower:"roundPower",defaultUnit:"defaultUnit"},outputs:{ngModelChange:"ngModelChange"}}),A})(),De=(()=>{class A{constructor(m,f){this.formatter=m,this.ngControl=f}setValue(m){const f=this.formatter.toIops(m);this.ngControl.control.setValue(`${f} IOPS`)}ngOnInit(){this.setValue(this.ngControl.value),this.ngDataReady&&this.ngDataReady.subscribe(()=>this.setValue(this.ngControl.value))}onUpdate(m){this.setValue(m)}}return A.\u0275fac=function(m){return new(m||A)(c.Y36(re.H),c.Y36(a.a5))},A.\u0275dir=c.lG2({type:A,selectors:[["","cdIops",""]],hostBindings:function(m,f){1&m&&c.NdJ("blur",function(h){return f.onUpdate(h.target.value)})},inputs:{ngDataReady:"ngDataReady"}}),A})();function v(A,M){if(1&A&&(c.ynx(0),c._UZ(1,"input",17),c.BQk()),2&A){const m=c.oxw().$implicit,f=c.oxw(2);c.xp6(1),c.Q6J("id",m.name)("name",m.name)("formControlName",m.name)("ngDataReady",f.ngDataReady)}}function Pe(A,M){if(1&A&&(c.ynx(0),c._UZ(1,"input",18),c.BQk()),2&A){const m=c.oxw().$implicit,f=c.oxw(2);c.xp6(1),c.Q6J("id",m.name)("name",m.name)("formControlName",m.name)("ngDataReady",f.ngDataReady)}}function H(A,M){if(1&A&&(c.ynx(0),c._UZ(1,"input",19),c.BQk()),2&A){const m=c.oxw().$implicit,f=c.oxw(2);c.xp6(1),c.Q6J("id",m.name)("name",m.name)("formControlName",m.name)("ngDataReady",f.ngDataReady)}}function N(A,M){1&A&&(c.TgZ(0,"span",20),c.SDv(1,21),c.qZA())}const x=function(A){return{active:A}},T=function(A){return[A]};function k(A,M){if(1&A){const m=c.EpF();c.TgZ(0,"div",10)(1,"label",11),c._uU(2),c.TgZ(3,"cd-helper"),c._uU(4),c.qZA()(),c.TgZ(5,"div")(6,"div",12),c.ynx(7,13),c.YNc(8,v,2,4,"ng-container",14),c.YNc(9,Pe,2,4,"ng-container",14),c.YNc(10,H,2,4,"ng-container",14),c.BQk(),c.TgZ(11,"button",15),c.NdJ("click",function(){const h=c.CHM(m).$implicit;return c.oxw(2).reset(h.name)}),c._UZ(12,"i",7),c.qZA()(),c.YNc(13,N,2,0,"span",16),c.qZA()()}if(2&A){const m=M.$implicit,f=c.oxw().$implicit,P=c.oxw(),h=c.MAs(1);c.xp6(1),c.Q6J("for",m.name),c.xp6(1),c.Oqu(m.displayName),c.xp6(2),c.Oqu(m.description),c.xp6(1),c.Gre("cd-col-form-input ",f.heading,""),c.xp6(2),c.Q6J("ngSwitch",m.type),c.xp6(1),c.Q6J("ngSwitchCase",P.configurationType.milliseconds),c.xp6(1),c.Q6J("ngSwitchCase",P.configurationType.bps),c.xp6(1),c.Q6J("ngSwitchCase",P.configurationType.iops),c.xp6(1),c.Q6J("ngClass",c.VKq(13,x,P.isDisabled(m.name))),c.xp6(1),c.Q6J("ngClass",c.VKq(15,T,P.icons.erase)),c.xp6(1),c.Q6J("ngIf",P.form.showError("configuration."+m.name,h,"min"))}}function Z(A,M){if(1&A){const m=c.EpF();c.TgZ(0,"div",4)(1,"h4",5)(2,"span",6),c.NdJ("click",function(){const h=c.CHM(m).$implicit;return c.oxw().toggleSectionVisibility(h.class)}),c._uU(3),c._UZ(4,"i",7),c.qZA()(),c.TgZ(5,"div",8),c.YNc(6,k,14,17,"div",9),c.qZA()()}if(2&A){const m=M.$implicit,f=c.oxw();c.xp6(3),c.hij(" ",m.heading," "),c.xp6(1),c.Q6J("ngClass",f.sectionVisibility[m.class]?f.icons.minusCircle:f.icons.addCircle),c.xp6(1),c.Tol(m.class),c.Q6J("hidden",!f.sectionVisibility[m.class]),c.xp6(1),c.Q6J("ngForOf",m.options)}}let z=(()=>{class A{constructor(m,f){this.formatterService=m,this.rbdConfigurationService=f,this.initializeData=new L.t(1),this.changes=new c.vpe,this.icons=be.P,this.ngDataReady=new c.vpe,this.configurationType=U.r,this.sectionVisibility={}}ngOnInit(){const m=this.createConfigurationFormGroup();this.form.addControl("configuration",m),m.valueChanges.subscribe(()=>{this.changes.emit(this.getDirtyValues.bind(this))}),this.initializeData&&this.initializeData.subscribe(f=>{this.initialData=f.initialData;const P=f.sourceType;this.rbdConfigurationService.getWritableOptionFields().forEach(h=>{const $=f.initialData.filter(K=>K.name===h.name).pop();$&&$.source===P&&this.form.get(`configuration.${h.name}`).setValue($.value)}),this.ngDataReady.emit()}),this.rbdConfigurationService.getWritableSections().forEach(f=>this.sectionVisibility[f.class]=!1)}getDirtyValues(m=!1,f){if(m&&!f)throw new Error("ProgrammingError: If local values shall be included, a proper localFieldType argument has to be provided, too");const P={};return this.rbdConfigurationService.getWritableOptionFields().forEach(h=>{const $=this.form.get("configuration").get(h.name);this.initialData&&this.initialData[h.name]===$.value||($.dirty||m&&$.source===f)&&(P[h.name]=null===$.value?$.value:h.type===U.r.bps?this.formatterService.toBytes($.value):h.type===U.r.milliseconds?this.formatterService.toMilliseconds($.value):h.type===U.r.iops?this.formatterService.toIops($.value):$.value)}),P}createConfigurationFormGroup(){const m=new D.d({});return this.rbdConfigurationService.getWritableOptionFields().forEach(f=>{let P;if(f.type!==U.r.milliseconds&&f.type!==U.r.iops&&f.type!==U.r.bps)throw new Error(`Type ${f.type} is unknown, you may need to add it to RbdConfiguration class`);{let h=0;ne().forEach(this.initialData,$=>{$.name===f.name&&(h=$.value)}),P=new a.NI(h,a.kI.min(0))}m.addControl(f.name,P)}),m}reset(m){const f=this.form.get("configuration").get(m);f.disabled?(f.setValue(f.previousValue||0),f.enable(),f.previousValue||f.markAsPristine()):(f.previousValue=f.value,f.setValue(null),f.markAsDirty(),f.disable())}isDisabled(m){return this.form.get("configuration").get(m).disabled}toggleSectionVisibility(m){this.sectionVisibility[m]=!this.sectionVisibility[m]}}return A.\u0275fac=function(m){return new(m||A)(c.Y36(re.H),c.Y36(Ae.n))},A.\u0275cmp=c.Xpm({type:A,selectors:[["cd-rbd-configuration-form"]],inputs:{form:"form",initializeData:"initializeData"},outputs:{changes:"changes"},decls:5,vars:2,consts:function(){let M,m,f;return M="RBD Configuration",m="Remove the local configuration value. The parent configuration value will be inherited and used instead.",f="The minimum value is 0",[[3,"formGroup"],["cfgFormGroup",""],M,["class","col-12",4,"ngFor","ngForOf"],[1,"col-12"],[1,"cd-header"],[1,"collapsible",3,"click"],["aria-hidden","true",3,"ngClass"],[3,"hidden"],["class","form-group row",4,"ngFor","ngForOf"],[1,"form-group","row"],[1,"cd-col-form-label",3,"for"],[1,"input-group"],[3,"ngSwitch"],[4,"ngSwitchCase"],["type","button","data-toggle","button","title",m,1,"btn","btn-light",3,"ngClass","click"],["class","invalid-feedback",4,"ngIf"],["type","text","cdMilliseconds","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","defaultUnit","b","cdDimlessBinaryPerSecond","",1,"form-control",3,"id","name","formControlName","ngDataReady"],["type","text","cdIops","",1,"form-control",3,"id","name","formControlName","ngDataReady"],[1,"invalid-feedback"],f]},template:function(m,f){1&m&&(c.TgZ(0,"fieldset",0,1)(2,"legend"),c.SDv(3,2),c.qZA(),c.YNc(4,Z,7,7,"div",3),c.qZA()),2&m&&(c.Q6J("formGroup",f.form.get("configuration")),c.xp6(4),c.Q6J("ngForOf",f.rbdConfigurationService.sections))},directives:[a.JL,a.sg,Ne.V,C.sg,C.mk,le.P,F.S,C.RF,C.n9,ie.o,a.Fj,Y.b,e,a.JJ,a.u,Fe,De,C.O5],styles:[".collapsible[_ngcontent-%COMP%]{cursor:pointer;user-select:none}"]}),A})()},42176:(ut,he,p)=>{p.d(he,{P:()=>H});var c=p(83697),a=p(30633),g=p(89724);let ne=(()=>{class N{transform(T){return{0:"global",1:"pool",2:"image"}[T]}}return N.\u0275fac=function(T){return new(T||N)},N.\u0275pipe=g.Yjl({name:"rbdConfigurationSource",type:N,pure:!0}),N})();var L=p(28211),be=p(34089),D=p(11048),U=p(20044),re=p(48537),Ae=p(21766);const Ne=["configurationSourceTpl"],C=["configurationValueTpl"],le=["poolConfTable"];function F(N,x){1&N&&(g.TgZ(0,"span"),g.SDv(1,6),g.qZA())}function ie(N,x){1&N&&(g.TgZ(0,"strong"),g.SDv(1,7),g.qZA())}function Y(N,x){1&N&&(g.TgZ(0,"strong"),g.SDv(1,8),g.qZA())}function e(N,x){1&N&&(g.TgZ(0,"div",4),g.YNc(1,F,2,0,"span",5),g.YNc(2,ie,2,0,"strong",5),g.YNc(3,Y,2,0,"strong",5),g.qZA()),2&N&&(g.Q6J("ngSwitch",x.value),g.xp6(1),g.Q6J("ngSwitchCase","global"),g.xp6(1),g.Q6J("ngSwitchCase","image"),g.xp6(1),g.Q6J("ngSwitchCase","pool"))}function X(N,x){if(1&N&&(g.TgZ(0,"span"),g._uU(1),g.ALo(2,"dimlessBinaryPerSecond"),g.qZA()),2&N){const T=g.oxw().value;g.xp6(1),g.Oqu(g.lcZ(2,1,T))}}function Fe(N,x){if(1&N&&(g.TgZ(0,"span"),g._uU(1),g.ALo(2,"milliseconds"),g.qZA()),2&N){const T=g.oxw().value;g.xp6(1),g.Oqu(g.lcZ(2,1,T))}}function De(N,x){if(1&N&&(g.TgZ(0,"span"),g._uU(1),g.ALo(2,"iops"),g.qZA()),2&N){const T=g.oxw().value;g.xp6(1),g.Oqu(g.lcZ(2,1,T))}}function v(N,x){if(1&N&&(g.TgZ(0,"span"),g._uU(1),g.qZA()),2&N){const T=g.oxw().value;g.xp6(1),g.Oqu(T)}}function Pe(N,x){if(1&N&&(g.TgZ(0,"div",4),g.YNc(1,X,3,3,"span",5),g.YNc(2,Fe,3,3,"span",5),g.YNc(3,De,3,3,"span",5),g.YNc(4,v,2,1,"span",9),g.qZA()),2&N){const T=x.row,k=g.oxw();g.Q6J("ngSwitch",T.type),g.xp6(1),g.Q6J("ngSwitchCase",k.typeField.bps),g.xp6(1),g.Q6J("ngSwitchCase",k.typeField.milliseconds),g.xp6(1),g.Q6J("ngSwitchCase",k.typeField.iops)}}let H=(()=>{class N{constructor(T,k){this.formatterService=T,this.rbdConfigurationService=k,this.sourceField=a.h,this.typeField=a.r}ngOnInit(){this.poolConfigurationColumns=[{prop:"displayName",name:"Name"},{prop:"description",name:"Description"},{prop:"name",name:"Key"},{prop:"source",name:"Source",cellTemplate:this.configurationSourceTpl,pipe:new ne},{prop:"value",name:"Value",cellTemplate:this.configurationValueTpl}]}ngOnChanges(){!this.data||(this.data=this.data.filter(T=>this.rbdConfigurationService.getOptionFields().map(k=>k.name).includes(T.name)))}}return N.\u0275fac=function(T){return new(T||N)(g.Y36(L.H),g.Y36(be.n))},N.\u0275cmp=g.Xpm({type:N,selectors:[["cd-rbd-configuration-table"]],viewQuery:function(T,k){if(1&T&&(g.Gf(Ne,7),g.Gf(C,7),g.Gf(le,7)),2&T){let Z;g.iGM(Z=g.CRH())&&(k.configurationSourceTpl=Z.first),g.iGM(Z=g.CRH())&&(k.configurationValueTpl=Z.first),g.iGM(Z=g.CRH())&&(k.poolConfTable=Z.first)}},inputs:{data:"data"},features:[g.TTD],decls:6,vars:2,consts:function(){let x,T,k;return x="Global",T="Image",k="Pool",[["identifier","name",3,"data","columns"],["poolConfTable",""],["configurationSourceTpl",""],["configurationValueTpl",""],[3,"ngSwitch"],[4,"ngSwitchCase"],x,T,k,[4,"ngSwitchDefault"]]},template:function(T,k){1&T&&(g._UZ(0,"cd-table",0,1),g.YNc(2,e,4,4,"ng-template",null,2,g.W1O),g.YNc(4,Pe,5,4,"ng-template",null,3,g.W1O)),2&T&&g.Q6J("data",k.data)("columns",k.poolConfigurationColumns)},directives:[c.a,D.RF,D.n9,D.ED],pipes:[U.O,re.J,Ae.A],styles:[""]}),N})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/543.eec5c8f9f29060da.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/543.eec5c8f9f29060da.js deleted file mode 100644 index 1971c5576..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/543.eec5c8f9f29060da.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[543],{38543:(qi,we,l)=>{l.r(we),l.d(we,{RgwModule:()=>u_,RoutedRgwModule:()=>ki});var M=l(11048),a=l(1659),w=l(55860),O=l(71334),ke=l(37496),A=l(79512),$_=l(4268),I_=l(44466),v_=l(66265),F_=l(23815),R=l.n(F_),_e=l(35758),be=l(95152),qe=l(33394),Be=l(64762),He=l(19725),Fe=l(25917),Xe=l(19773),h_=l(96736),L_=l(5304),Ne=l(20523),y_=l(93523),e=l(89724);let D=class{constructor(o,_){this.http=o,this.rgwDaemonService=_,this.url="api/rgw/user"}list(){return this.enumerate().pipe((0,Xe.zg)(o=>o.length>0?(0,_e.D)(o.map(_=>this.get(_))):(0,Fe.of)([])))}enumerate(){return this.rgwDaemonService.request(o=>this.http.get(this.url,{params:o}))}enumerateEmail(){return this.rgwDaemonService.request(o=>this.http.get(`${this.url}/get_emails`,{params:o}))}get(o){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${o}`,{params:_}))}getQuota(o){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${o}/quota`,{params:_}))}create(o){return this.rgwDaemonService.request(_=>(R().keys(o).forEach(n=>{_=_.append(n,o[n])}),this.http.post(this.url,null,{params:_})))}update(o,_){return this.rgwDaemonService.request(n=>(R().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.put(`${this.url}/${o}`,null,{params:n})))}updateQuota(o,_){return this.rgwDaemonService.request(n=>(R().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.put(`${this.url}/${o}/quota`,null,{params:n})))}delete(o){return this.rgwDaemonService.request(_=>this.http.delete(`${this.url}/${o}`,{params:_}))}createSubuser(o,_){return this.rgwDaemonService.request(n=>(R().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.post(`${this.url}/${o}/subuser`,null,{params:n})))}deleteSubuser(o,_){return this.rgwDaemonService.request(n=>this.http.delete(`${this.url}/${o}/subuser/${_}`,{params:n}))}addCapability(o,_,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",n),this.http.post(`${this.url}/${o}/capability`,null,{params:i})))}deleteCapability(o,_,n){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",n),this.http.delete(`${this.url}/${o}/capability`,{params:i})))}addS3Key(o,_){return this.rgwDaemonService.request(n=>(n=n.append("key_type","s3"),R().keys(_).forEach(i=>{n=n.append(i,_[i])}),this.http.post(`${this.url}/${o}/key`,null,{params:n})))}deleteS3Key(o,_){return this.rgwDaemonService.request(n=>(n=(n=n.append("key_type","s3")).append("access_key",_),this.http.delete(`${this.url}/${o}/key`,{params:n})))}exists(o){return this.get(o).pipe((0,h_.h)(!0),(0,L_.K)(_=>(R().isFunction(_.preventDefault)&&_.preventDefault(),(0,Fe.of)(!1))))}emailExists(o){return o=decodeURIComponent(o),this.enumerateEmail().pipe((0,Xe.zg)(_=>{const n=R().indexOf(_,o);return(0,Fe.of)(-1!==n)}))}};D.\u0275fac=function(o){return new(o||D)(e.LFG(He.eN),e.LFG(Ne.b))},D.\u0275prov=e.Yz7({token:D,factory:D.\u0275fac,providedIn:"root"}),D=(0,Be.gn)([y_.o,(0,Be.w6)("design:paramtypes",[He.eN,Ne.b])],D);var k=l(65862),te=l(18001),Ke=l(93614),p=l(90070),he=l(97161);class We{constructor(){this.kmsProviders=["vault"],this.authMethods=["token","agent"],this.secretEngines=["kv","transit"],this.sse_s3="AES256",this.sse_kms="aws:kms"}}var K=(()=>{return(t=K||(K={})).ENABLED="Enabled",t.DISABLED="Disabled",K;var t})(),z=(()=>{return(t=z||(z={})).ENABLED="Enabled",t.SUSPENDED="Suspended",z;var t})(),Q=l(62862),ne=l(60312),Y=l(41582),H=l(56310),q=l(87925),X=l(94276),Le=l(18372),J=l(30839);function D_(t,o){1&t&&(e.TgZ(0,"option",29),e.SDv(1,30),e.qZA()),2&t&&e.Q6J("ngValue",null)}function x_(t,o){if(1&t&&(e.TgZ(0,"option",31),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function Z_(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,33),e.qZA())}function w_(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",22),e.SDv(3,23),e.qZA(),e.TgZ(4,"div",24)(5,"select",25),e.YNc(6,D_,2,1,"option",26),e.YNc(7,x_,2,2,"option",27),e.qZA(),e.YNc(8,Z_,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(6),e.Q6J("ngIf",null!==_.kmsProviders),e.xp6(1),e.Q6J("ngForOf",_.kmsProviders),e.xp6(1),e.Q6J("ngIf",_.configForm.showError("kms_provider",n,"required"))}}function k_(t,o){if(1&t&&(e.TgZ(0,"option",31),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function q_(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,37),e.qZA())}function B_(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",34),e.SDv(3,35),e.qZA(),e.TgZ(4,"div",24)(5,"select",36),e.YNc(6,k_,2,2,"option",27),e.qZA(),e.YNc(7,q_,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(6),e.Q6J("ngForOf",_.authMethods),e.xp6(1),e.Q6J("ngIf",_.configForm.showError("auth_method",n,"required"))}}function H_(t,o){if(1&t&&(e.TgZ(0,"option",31),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function X_(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,41),e.qZA())}function K_(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",38),e.SDv(3,39),e.qZA(),e.TgZ(4,"div",24)(5,"select",40),e.YNc(6,H_,2,2,"option",27),e.qZA(),e.YNc(7,X_,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(6),e.Q6J("ngForOf",_.secretEngines),e.xp6(1),e.Q6J("ngIf",_.configForm.showError("secret_engine",n,"required"))}}function z_(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,45),e.qZA())}function Q_(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",42),e.SDv(3,43),e.qZA(),e.TgZ(4,"div",24),e._UZ(5,"input",44),e.YNc(6,z_,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(6),e.Q6J("ngIf",_.configForm.showError("secret_path",n,"required"))}}function Y_(t,o){1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",46),e.SDv(3,47),e.qZA(),e.TgZ(4,"div",24),e._UZ(5,"input",48),e.qZA()()())}function J_(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,52),e.qZA())}function V_(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",49),e.SDv(3,50),e.qZA(),e.TgZ(4,"div",24),e._UZ(5,"input",51),e.YNc(6,J_,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(6),e.Q6J("ngIf",_.configForm.showError("address",n,"required"))}}function j_(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,57),e.qZA())}function et(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div",7)(1,"label",53)(2,"span"),e.SDv(3,54),e.qZA(),e.TgZ(4,"cd-helper"),e.SDv(5,55),e.qZA()(),e.TgZ(6,"div",24)(7,"input",56),e.NdJ("change",function(i){return e.CHM(_),e.oxw().fileUpload(i.target.files,"token")}),e.qZA(),e.YNc(8,j_,2,0,"span",28),e.qZA()()}if(2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(8),e.Q6J("ngIf",_.configForm.showError("token",n,"required"))}}function _t(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,62),e.qZA())}function tt(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",7)(2,"label",58)(3,"span"),e.SDv(4,59),e.qZA(),e.TgZ(5,"cd-helper"),e.SDv(6,60),e.qZA()(),e.TgZ(7,"div",24)(8,"input",61),e.NdJ("change",function(i){return e.CHM(_),e.oxw().fileUpload(i.target.files,"ssl_cert")}),e.qZA(),e.YNc(9,_t,2,0,"span",28),e.qZA()()()}if(2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(9),e.Q6J("ngIf",_.configForm.showError("ssl_cert",n,"required"))}}function nt(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,67),e.qZA())}function ot(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",7)(2,"label",63)(3,"span"),e.SDv(4,64),e.qZA(),e.TgZ(5,"cd-helper"),e.SDv(6,65),e.qZA()(),e.TgZ(7,"div",24)(8,"input",66),e.NdJ("change",function(i){return e.CHM(_),e.oxw().fileUpload(i.target.files,"client_cert")}),e.qZA(),e.YNc(9,nt,2,0,"span",28),e.qZA()()()}if(2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(9),e.Q6J("ngIf",_.configForm.showError("client_cert",n,"required"))}}function it(t,o){1&t&&(e.TgZ(0,"span",32),e.SDv(1,72),e.qZA())}function st(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",7)(2,"label",68)(3,"span"),e.SDv(4,69),e.qZA(),e.TgZ(5,"cd-helper"),e.SDv(6,70),e.qZA()(),e.TgZ(7,"div",24)(8,"input",71),e.NdJ("change",function(i){return e.CHM(_),e.oxw().fileUpload(i.target.files,"client_key")}),e.qZA(),e.YNc(9,it,2,0,"span",28),e.qZA()()()}if(2&t){const _=e.oxw(),n=e.MAs(5);e.xp6(9),e.Q6J("ngIf",_.configForm.showError("client_key",n,"required"))}}let at=(()=>{class t{constructor(_,n,i,s,r,d,g){this.formBuilder=_,this.activeModal=n,this.router=i,this.actionLabels=s,this.rgwBucketService=r,this.rgwEncryptionModal=d,this.notificationService=g,this.vaultAddress=/^((https?:\/\/)|(www.))(?:([a-zA-Z]+)|(\d+\.\d+.\d+.\d+)):\d{4}$/,this.submitAction=new e.vpe,this.createForm()}ngOnInit(){this.kmsProviders=this.rgwEncryptionModal.kmsProviders,this.authMethods=this.rgwEncryptionModal.authMethods,this.secretEngines=this.rgwEncryptionModal.secretEngines}createForm(){this.configForm=this.formBuilder.group({address:[null,[a.kI.required,p.h.custom("vaultPattern",_=>!R().isEmpty(_)&&!this.vaultAddress.test(_))]],kms_provider:["vault",a.kI.required],encryptionType:["aws:kms",a.kI.required],auth_method:["token",a.kI.required],secret_engine:["kv",a.kI.required],secret_path:["/"],namespace:[null],token:[null,[p.h.requiredIf({auth_method:"token"})]],ssl_cert:[null,p.h.sslCert()],client_cert:[null,p.h.pemCert()],client_key:[null,p.h.sslPrivKey()],kmsEnabled:[{value:!1}],s3Enabled:[{value:!1}]})}fileUpload(_,n){const i=_[0];(new FileReader).addEventListener("load",()=>{const r=this.configForm.get(n);r.setValue(i),r.markAsDirty(),r.markAsTouched(),r.updateValueAndValidity()})}onSubmit(){const _=this.configForm.value;this.rgwBucketService.setEncryptionConfig(_.encryptionType,_.kms_provider,_.auth_method,_.secret_engine,_.secret_path,_.namespace,_.address,_.token,_.owner,_.ssl_cert,_.client_cert,_.client_key).subscribe({next:()=>{this.notificationService.show(te.k.success,"Updated RGW Encryption Configuration values")},error:n=>{this.notificationService.show(te.k.error,n),this.configForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.router.routeReuseStrategy.shouldReuseRoute=()=>!1,this.router.onSameUrlNavigation="reload",this.router.navigate([this.router.url])}})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Q.O),e.Y36(O.Kz),e.Y36(w.F0),e.Y36(A.p4),e.Y36(be.o),e.Y36(We),e.Y36(he.g))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-config-modal"]],outputs:{submitAction:"submitAction"},features:[e._Bn([We])],decls:30,vars:14,consts:function(){let o,_,n,i,s,r,d,g,E,S,G,P,b,N,m,W,U,$,I,v,F,h,L,y,f,x,T,B;return o="Update RGW Encryption Configurations",_="Encryption Type",n="SSE-S3 Encryption",i="SSE-KMS Encryption",s="Key management service provider",r="-- Select a provider --",d="This field is required.",g="Authentication Method",E="This field is required.",S="Secret Engine",G="This field is required.",P="Secret Path ",b="This field is required.",N="Namespace ",m="Vault Address ",W="This field is required.",U="Token",$=" The token authentication method expects a Vault token to be present in a plaintext file. ",I="This field is required.",v="CA Certificate",F="The SSL certificate in PEM format.",h="This field is required.",L="Client Certificate",y="The Client certificate in PEM format.",f="This field is required.",x="Client Private Key",T="The Client Private Key in PEM format.",B="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["name","configForm",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","encryptionType",1,"cd-col-form-label","required"],_,[1,"col-md-auto","custom-checkbox","form-check-inline","ms-3"],["formControlName","encryptionType","id","s3Enabled","type","radio","name","encryptionType","value","AES256",1,"form-check-input"],["for","s3Enabled",1,"custom-check-label"],n,[1,"col-md-auto","custom-checkbox","form-check-inline"],["formControlName","encryptionType","id","kmsEnabled","name","encryptionType","value","aws:kms","type","radio",1,"form-check-input"],["for","kmsEnabled",1,"custom-check-label"],i,[4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"submitText","form","submitActionEvent"],["for","kms_provider",1,"cd-col-form-label","required"],s,[1,"cd-col-form-input"],["id","kms_provider","name","kms_provider","formControlName","kms_provider",1,"form-select"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],["class","invalid-feedback",4,"ngIf"],[3,"ngValue"],r,[3,"value"],[1,"invalid-feedback"],d,["for","auth_method",1,"cd-col-form-label","required"],g,["id","auth_method","name","auth_method","formControlName","auth_method",1,"form-select"],E,["for","secret_engine",1,"cd-col-form-label","required"],S,["id","secret_engine","name","secret_engine","formControlName","secret_engine",1,"form-select"],G,["for","secret_path",1,"cd-col-form-label"],P,["id","secret_path","name","secret_path","type","text","formControlName","secret_path",1,"form-control"],b,["for","namespace",1,"cd-col-form-label"],N,["id","namespace","name","namespace","type","text","formControlName","namespace",1,"form-control"],["for","address",1,"cd-col-form-label","required"],m,["id","address","name","address","formControlName","address","placeholder","http://127.0.0.1:8000",1,"form-control"],W,["for","token",1,"cd-col-form-label","required"],U,$,["type","file","formControlName","token",3,"change"],I,["for","ssl_cert",1,"cd-col-form-label"],v,F,["type","file","formControlName","ssl_cert",3,"change"],h,["for","client_cert",1,"cd-col-form-label"],L,y,["type","file","formControlName","client_cert",3,"change"],f,["for","client_key",1,"cd-col-form-label"],x,T,["type","file",3,"change"],B]},template:function(_,n){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"div",7)(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e._UZ(11,"input",11),e.TgZ(12,"label",12),e.SDv(13,13),e.qZA()(),e.TgZ(14,"div",14),e._UZ(15,"input",15),e.TgZ(16,"label",16),e.SDv(17,17),e.qZA()()(),e.YNc(18,w_,9,3,"div",18),e.YNc(19,B_,8,2,"div",18),e.YNc(20,K_,8,2,"div",18),e.YNc(21,Q_,7,1,"div",18),e.YNc(22,Y_,6,0,"div",18),e.YNc(23,V_,7,1,"div",18),e.YNc(24,et,9,1,"div",19),e.YNc(25,tt,10,1,"div",18),e.YNc(26,ot,10,1,"div",18),e.YNc(27,st,10,1,"div",18),e.qZA(),e.TgZ(28,"div",20)(29,"cd-form-button-panel",21),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.qZA()()(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",n.activeModal),e.xp6(4),e.Q6J("formGroup",n.configForm),e.xp6(14),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","token"===n.configForm.getValue("auth_method")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===n.configForm.getValue("encryptionType")||"AES256"===n.configForm.getValue("encryptionType")),e.xp6(2),e.Q6J("submitText",n.actionLabels.SUBMIT)("form",n.configForm))},directives:[ne.z,a._Y,a.JL,Y.V,a.sg,H.P,q.o,a.Fj,a._,X.b,a.JJ,a.u,M.O5,a.EJ,a.YN,a.Kr,M.sg,Le.S,J.p],styles:[""]}),t})();var oe=l(63285),ze=l(63622),ie=l(82945),V=l(10545);function rt(t,o){1&t&&(e.TgZ(0,"div",9)(1,"label",42),e.SDv(2,43),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",44),e.qZA()())}function lt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,46),e.qZA())}function ct(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,47),e.qZA())}function dt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,48),e.qZA())}function ut(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,49),e.qZA())}function Rt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,50),e.qZA())}function Et(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,51),e.qZA())}function gt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,52),e.qZA())}function ft(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,53),e.qZA())}function St(t,o){1&t&&(e.TgZ(0,"option",54),e.SDv(1,55),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Tt(t,o){1&t&&(e.TgZ(0,"option",54),e.SDv(1,56),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Ct(t,o){if(1&t&&(e.TgZ(0,"option",57),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function Mt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,58),e.qZA())}function pt(t,o){1&t&&(e.TgZ(0,"option",54),e.SDv(1,60),e.qZA()),2&t&&e.Q6J("ngValue",null)}function mt(t,o){1&t&&(e.TgZ(0,"option",54),e.SDv(1,61),e.qZA()),2&t&&e.Q6J("ngValue",null)}function At(t,o){if(1&t&&(e.TgZ(0,"option",57),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_.name),e.xp6(1),e.Oqu(_.description)}}function Gt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,62),e.qZA())}function Pt(t,o){if(1&t&&(e.TgZ(0,"select",59),e.YNc(1,pt,2,1,"option",18),e.YNc(2,mt,2,1,"option",18),e.YNc(3,At,2,2,"option",19),e.qZA(),e.YNc(4,Gt,2,0,"span",14)),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(1),e.Q6J("ngIf",null===n.placementTargets),e.xp6(1),e.Q6J("ngIf",null!==n.placementTargets),e.xp6(1),e.Q6J("ngForOf",n.placementTargets),e.xp6(1),e.Q6J("ngIf",n.bucketForm.showError("placement-target",_,"required"))}}function Ot(t,o){1&t&&(e.ynx(0),e._UZ(1,"input",63),e.BQk())}function bt(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend",25),e.SDv(2,64),e.qZA(),e.TgZ(3,"div",9)(4,"div",27)(5,"div",28)(6,"input",65),e.NdJ("change",function(){return e.CHM(_),e.oxw(2).setMfaDeleteValidators()}),e.qZA(),e.TgZ(7,"label",66),e.SDv(8,67),e.qZA(),e.TgZ(9,"cd-helper")(10,"span"),e.SDv(11,68),e.qZA()()()()()()}}function Nt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,77),e.qZA())}function Wt(t,o){if(1&t&&(e.TgZ(0,"div",9)(1,"label",74),e.SDv(2,75),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",76),e.YNc(5,Nt,2,0,"span",14),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.bucketForm.showError("mfa-token-serial",_,"required"))}}function Ut(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,81),e.qZA())}function $t(t,o){if(1&t&&(e.TgZ(0,"div",9)(1,"label",78),e.SDv(2,79),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",80),e.YNc(5,Ut,2,0,"span",14),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.bucketForm.showError("mfa-token-pin",_,"required"))}}function It(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend",25),e.SDv(2,69),e.qZA(),e.TgZ(3,"div",9)(4,"div",27)(5,"div",28)(6,"input",70),e.NdJ("change",function(){return e.CHM(_),e.oxw(2).setMfaDeleteValidators()}),e.qZA(),e.TgZ(7,"label",71),e.SDv(8,72),e.qZA(),e.TgZ(9,"cd-helper")(10,"span"),e.SDv(11,73),e.qZA()()()()(),e.YNc(12,Wt,6,1,"div",8),e.YNc(13,$t,6,1,"div",8),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(12),e.Q6J("ngIf",_.areMfaCredentialsRequired()),e.xp6(1),e.Q6J("ngIf",_.areMfaCredentialsRequired())}}function vt(t,o){1&t&&(e.TgZ(0,"div",9)(1,"label",82),e.SDv(2,83),e.qZA(),e.TgZ(3,"div",12)(4,"select",84)(5,"option",85),e.SDv(6,86),e.qZA(),e.TgZ(7,"option",87),e.SDv(8,88),e.qZA()()()())}function Ft(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,93),e.qZA())}function ht(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,94),e.qZA())}function Lt(t,o){if(1&t&&(e.TgZ(0,"div",9)(1,"label",89),e.ynx(2),e.SDv(3,90),e.BQk(),e.TgZ(4,"cd-helper"),e.SDv(5,91),e.qZA()(),e.TgZ(6,"div",12),e._UZ(7,"input",92),e.YNc(8,Ft,2,0,"span",14),e.YNc(9,ht,2,0,"span",14),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(8),e.Q6J("ngIf",n.bucketForm.showError("lock_retention_period_days",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.bucketForm.showError("lock_retention_period_days",_,"lockDays"))}}function yt(t,o){1&t&&(e.TgZ(0,"option",54),e.SDv(1,105),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Dt(t,o){if(1&t&&(e.TgZ(0,"option",57),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function xt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,106),e.qZA())}function Zt(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",9)(2,"label",102),e.SDv(3,103),e.qZA(),e.TgZ(4,"div",12)(5,"select",104),e.YNc(6,yt,2,1,"option",18),e.YNc(7,Dt,2,2,"option",19),e.qZA(),e.YNc(8,xt,2,0,"span",14),e.qZA()()()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("autofocus",n.editing),e.xp6(1),e.Q6J("ngIf",null!==n.kmsProviders),e.xp6(1),e.Q6J("ngForOf",n.kmsProviders),e.xp6(1),e.Q6J("ngIf",n.bucketForm.showError("kms_provider",_,"required"))}}function wt(t,o){1&t&&(e.TgZ(0,"span",45),e.SDv(1,110),e.qZA())}function kt(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",9)(2,"label",107),e.SDv(3,108),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",109),e.YNc(6,wt,2,0,"span",14),e.qZA()()()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(6),e.Q6J("ngIf",n.bucketForm.showError("keyId",_,"required"))}}function qt(t,o){if(1&t&&(e.TgZ(0,"div")(1,"div",9)(2,"div",27)(3,"div",95),e._UZ(4,"input",96),e.TgZ(5,"label",97),e.SDv(6,98),e.qZA()()()(),e.TgZ(7,"div",9)(8,"div",27)(9,"div",95),e._UZ(10,"input",99),e.TgZ(11,"label",100),e.SDv(12,101),e.qZA()()()(),e.YNc(13,Zt,9,4,"div",24),e.YNc(14,kt,7,1,"div",24),e.qZA()),2&t){const _=e.oxw(2);e.xp6(4),e.uIk("disabled",!_.s3VaultConfig||null),e.xp6(6),e.uIk("disabled",!_.kmsVaultConfig||null),e.xp6(3),e.Q6J("ngIf","aws:kms"===_.bucketForm.getValue("encryption_type")),e.xp6(1),e.Q6J("ngIf","aws:kms"===_.bucketForm.getValue("encryption_type"))}}const Qe=function(t){return{required:t}};function Bt(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,rt,5,0,"div",8),e.TgZ(10,"div",9)(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,lt,2,0,"span",14),e.YNc(16,ct,2,0,"span",14),e.YNc(17,dt,2,0,"span",14),e.YNc(18,ut,2,0,"span",14),e.YNc(19,Rt,2,0,"span",14),e.YNc(20,Et,2,0,"span",14),e.YNc(21,gt,2,0,"span",14),e.YNc(22,ft,2,0,"span",14),e.qZA()(),e.TgZ(23,"div",9)(24,"label",15),e.SDv(25,16),e.qZA(),e.TgZ(26,"div",12)(27,"select",17),e.YNc(28,St,2,1,"option",18),e.YNc(29,Tt,2,1,"option",18),e.YNc(30,Ct,2,2,"option",19),e.qZA(),e.YNc(31,Mt,2,0,"span",14),e.qZA()(),e.TgZ(32,"div",9)(33,"label",20),e.SDv(34,21),e.qZA(),e.TgZ(35,"div",12),e.YNc(36,Pt,5,4,"ng-template",null,22,e.W1O),e.YNc(38,Ot,2,0,"ng-container",23),e.qZA()(),e.YNc(39,bt,12,0,"fieldset",24),e.YNc(40,It,14,2,"fieldset",24),e.TgZ(41,"fieldset")(42,"legend",25),e.SDv(43,26),e.qZA(),e.TgZ(44,"div",9)(45,"div",27)(46,"div",28),e._UZ(47,"input",29),e.TgZ(48,"label",30),e.SDv(49,31),e.qZA(),e.TgZ(50,"cd-helper")(51,"span"),e.SDv(52,32),e.qZA()()()()(),e.YNc(53,vt,9,0,"div",8),e.YNc(54,Lt,10,2,"div",8),e.qZA(),e.TgZ(55,"fieldset")(56,"legend",25),e.SDv(57,33),e.qZA(),e.TgZ(58,"div",9)(59,"div",27)(60,"div",28),e._UZ(61,"input",34),e.TgZ(62,"label",35),e.SDv(63,36),e.qZA(),e.TgZ(64,"cd-helper",37)(65,"span"),e.tHW(66,38),e.TgZ(67,"a",39),e.NdJ("click",function(){return e.CHM(_),e.oxw().openConfigModal()}),e.qZA(),e.N_p(),e.qZA()()()()(),e.YNc(68,qt,15,4,"div",24),e.qZA()(),e.TgZ(69,"div",40)(70,"cd-form-button-panel",41),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().submit()}),e.ALo(71,"titlecase"),e.ALo(72,"upperFirst"),e.qZA()()()()()}if(2&t){const _=e.MAs(2),n=e.MAs(37),i=e.oxw();e.xp6(1),e.Q6J("formGroup",i.bucketForm),e.xp6(6),e.pQV(e.lcZ(6,31,i.action))(e.lcZ(7,33,i.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",i.editing),e.xp6(2),e.Q6J("ngClass",e.VKq(39,Qe,!i.editing)),e.xp6(3),e.Q6J("readonly",i.editing)("autofocus",!i.editing),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"required")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameInvalid")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameNotAllowed")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"containsUpperCase")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"lowerCaseOrNumber")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"ipAddress")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"onlyLowerCaseAndNumbers")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"shouldBeInRange")),e.xp6(5),e.Q6J("autofocus",i.editing),e.xp6(1),e.Q6J("ngIf",null===i.owners),e.xp6(1),e.Q6J("ngIf",null!==i.owners),e.xp6(1),e.Q6J("ngForOf",i.owners),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("owner",_,"required")),e.xp6(2),e.Q6J("ngClass",e.VKq(41,Qe,!i.editing)),e.xp6(5),e.Q6J("ngIf",i.editing)("ngIfElse",n),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(13),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(7),e.uIk("disabled",!i.kmsVaultConfig&&!i.s3VaultConfig||null),e.xp6(7),e.Q6J("ngIf",i.bucketForm.getValue("encryption_enabled")),e.xp6(2),e.Q6J("form",i.bucketForm)("submitText",e.lcZ(71,35,i.action)+" "+e.lcZ(72,37,i.resource))}}let Ye=(()=>{class t extends Ke.E{constructor(_,n,i,s,r,d,g,E,S,G,P){super(),this.route=_,this.router=n,this.formBuilder=i,this.rgwBucketService=s,this.rgwSiteService=r,this.modalService=d,this.rgwUserService=g,this.notificationService=E,this.rgwEncryptionModal=S,this.actionLabels=G,this.changeDetectorRef=P,this.editing=!1,this.owners=null,this.kmsProviders=null,this.placementTargets=[],this.isVersioningAlreadyEnabled=!1,this.isMfaDeleteAlreadyEnabled=!1,this.icons=k.P,this.kmsVaultConfig=!1,this.s3VaultConfig=!1,this.editing=this.router.url.startsWith(`/rgw/bucket/${A.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="bucket",this.createForm()}get isVersioningEnabled(){return this.bucketForm.getValue("versioning")}get isMfaDeleteEnabled(){return this.bucketForm.getValue("mfa-delete")}ngAfterViewChecked(){this.changeDetectorRef.detectChanges()}createForm(){const _=this,n=p.h.custom("lockDays",()=>{if(!_.bucketForm||!R().get(_.bucketForm.getRawValue(),"lock_enabled"))return!1;const i=Number(_.bucketForm.getValue("lock_retention_period_days"));return!Number.isInteger(i)||0===i});this.bucketForm=this.formBuilder.group({id:[null],bid:[null,[a.kI.required],this.editing?[]:[p.h.bucketName(),p.h.bucketExistence(!1,this.rgwBucketService)]],owner:[null,[a.kI.required]],kms_provider:["vault"],"placement-target":[null,this.editing?[]:[a.kI.required]],versioning:[null],"mfa-delete":[null],"mfa-token-serial":[""],"mfa-token-pin":[""],lock_enabled:[{value:!1,disabled:this.editing}],encryption_enabled:[null],encryption_type:[null,[p.h.requiredIf({encryption_enabled:!0})]],keyId:[null,[p.h.requiredIf({encryption_type:"aws:kms",encryption_enabled:!0})]],lock_mode:["COMPLIANCE"],lock_retention_period_days:[0,[p.h.number(!1),n]]})}ngOnInit(){const _={owners:this.rgwUserService.enumerate()};this.kmsProviders=this.rgwEncryptionModal.kmsProviders,this.rgwBucketService.getEncryptionConfig().subscribe(n=>{this.kmsVaultConfig=n[0],this.s3VaultConfig=n[1],this.kmsVaultConfig&&this.s3VaultConfig?this.bucketForm.get("encryption_type").setValue(""):this.kmsVaultConfig?this.bucketForm.get("encryption_type").setValue("aws:kms"):this.s3VaultConfig?this.bucketForm.get("encryption_type").setValue("AES256"):this.bucketForm.get("encryption_type").setValue("")}),this.editing||(_.getPlacementTargets=this.rgwSiteService.get("placement-targets")),this.route.params.subscribe(n=>{if(n.hasOwnProperty("bid")){const i=decodeURIComponent(n.bid);_.getBid=this.rgwBucketService.get(i)}(0,_e.D)(_).subscribe(i=>{if(this.owners=i.owners.sort(),i.getPlacementTargets){const s=i.getPlacementTargets;this.zonegroup=s.zonegroup,R().forEach(s.placement_targets,r=>{r.description=`${r.name} (${"pool"}: ${r.data_pool})`,this.placementTargets.push(r)}),1===this.placementTargets.length&&this.bucketForm.get("placement-target").setValue(this.placementTargets[0].name)}if(i.getBid){const s=i.getBid,r=R().clone(this.bucketForm.getRawValue());let d=R().pick(s,R().keys(r));d.lock_retention_period_days=this.rgwBucketService.getLockDays(s),d["placement-target"]=s.placement_rule,d.versioning=s.versioning===z.ENABLED,d["mfa-delete"]=s.mfa_delete===K.ENABLED,d.encryption_enabled="Enabled"===s.encryption,d=R().merge(r,d),this.bucketForm.setValue(d),this.editing&&(this.isVersioningAlreadyEnabled=this.isVersioningEnabled,this.isMfaDeleteAlreadyEnabled=this.isMfaDeleteEnabled,this.setMfaDeleteValidators(),d.lock_enabled&&this.bucketForm.controls.versioning.disable())}this.loadingReady()})})}goToListView(){this.router.navigate(["/rgw/bucket"])}submit(){if(null==this.bucketForm.getValue("encryption_enabled")&&(this.bucketForm.get("encryption_enabled").setValue(!1),this.bucketForm.get("encryption_type").setValue(null)),this.bucketForm.pristine)return void this.goToListView();const _=this.bucketForm.value;if(this.editing){const n=this.getVersioningStatus(),i=this.getMfaDeleteStatus();this.rgwBucketService.update(_.bid,_.id,_.owner,n,_.encryption_enabled,_.encryption_type,_.keyId,i,_["mfa-token-serial"],_["mfa-token-pin"],_.lock_mode,_.lock_retention_period_days).subscribe(()=>{this.notificationService.show(te.k.success,"Updated Object Gateway bucket '" + _.bid + "'."),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}else this.rgwBucketService.create(_.bid,_.owner,this.zonegroup,_["placement-target"],_.lock_enabled,_.lock_mode,_.lock_retention_period_days,_.encryption_enabled,_.encryption_type,_.keyId).subscribe(()=>{this.notificationService.show(te.k.success,"Created Object Gateway bucket '" + _.bid + "'"),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}areMfaCredentialsRequired(){return this.isMfaDeleteEnabled!==this.isMfaDeleteAlreadyEnabled||this.isMfaDeleteAlreadyEnabled&&this.isVersioningEnabled!==this.isVersioningAlreadyEnabled}setMfaDeleteValidators(){const _=this.bucketForm.get("mfa-token-serial"),n=this.bucketForm.get("mfa-token-pin");this.areMfaCredentialsRequired()?(_.setValidators(a.kI.required),n.setValidators(a.kI.required)):(_.setValidators(null),n.setValidators(null)),_.updateValueAndValidity(),n.updateValueAndValidity()}getVersioningStatus(){return this.isVersioningEnabled?z.ENABLED:z.SUSPENDED}getMfaDeleteStatus(){return this.isMfaDeleteEnabled?K.ENABLED:K.DISABLED}fileUpload(_,n){const i=_[0];(new FileReader).addEventListener("load",()=>{const r=this.bucketForm.get(n);r.setValue(i),r.markAsDirty(),r.markAsTouched(),r.updateValueAndValidity()})}openConfigModal(){this.modalService.show(at,null,{size:"lg"}).componentInstance.configForm.get("encryptionType").setValue(this.bucketForm.getValue("encryption_type")||"AES256")}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(w.gz),e.Y36(w.F0),e.Y36(Q.O),e.Y36(be.o),e.Y36(qe.I),e.Y36(oe.Z),e.Y36(D),e.Y36(he.g),e.Y36(We),e.Y36(A.p4),e.Y36(e.sBO))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-form"]],features:[e._Bn([We]),e.qOj],decls:1,vars:1,consts:function(){let o,_,n,i,s,r,d,g,E,S,G,P,b,N,m,W,U,$,I,v,F,h,L,y,f,x,T,B,C,se,ae,re,le,ce,de,ue,Re,Ee,ge,fe,Se,Te,Ce,Me,pe,me,Ae,Ge,Pe,Oe;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",n="Name...",i="Owner",s="Placement target",r="Locking",d="Enabled",g="Enables locking for the objects in the bucket. Locking can only be enabled while creating a bucket.",E="Security",S="Encryption",G="Enables encryption for the objects in the bucket. To enable encryption on a bucket you need to set the configuration values for SSE-S3 or SSE-KMS. To set the configuration values " + "\ufffd#67\ufffd" + "Click here" + "\ufffd/#67\ufffd" + "",P="Id",b="This field is required.",N="Bucket names can only contain lowercase letters, numbers, periods and hyphens.",m="The chosen name is already in use.",W="Bucket names must not contain uppercase characters or underscores.",U="Each label must start and end with a lowercase letter or a number.",$="Bucket names cannot be formatted as IP address.",I="Bucket labels cannot be empty and can only contain lowercase letters, numbers and hyphens.",v="Bucket names must be 3 to 63 characters long.",F="Loading...",h="-- Select a user --",L="This field is required.",y="Loading...",f="-- Select a placement target --",x="This field is required.",T="Versioning",B="Enabled",C="Enables versioning for the objects in the bucket.",se="Multi-Factor Authentication",ae="Delete enabled",re="Enables MFA (multi-factor authentication) Delete, which requires additional authentication for changing the bucket versioning state.",le="Token Serial Number",ce="This field is required.",de="Token PIN",ue="This field is required.",Re="Mode",Ee="Compliance",ge="Governance",fe="Days",Se="The number of days that you want to specify for the default retention period that will be applied to new objects placed in this bucket.",Te="The entered value must be a positive integer.",Ce="Retention Days must be a positive integer.",Me="SSE-S3 Encryption",pe="Connect to an external key management service",me="KMS Provider",Ae="-- Select a provider --",Ge="This field is required.",Pe="Key Id ",Oe="This field is required.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","bucketForm","novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],o,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","bid",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","bid","name","bid","type","text","placeholder",n,"formControlName","bid",1,"form-control",3,"readonly","autofocus"],["class","invalid-feedback",4,"ngIf"],["for","owner",1,"cd-col-form-label","required"],i,["id","owner","name","owner","formControlName","owner",1,"form-select",3,"autofocus"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],["for","placement-target",1,"cd-col-form-label",3,"ngClass"],s,["placementTargetSelect",""],[4,"ngIf","ngIfElse"],[4,"ngIf"],[1,"cd-header"],r,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","lock_enabled","formControlName","lock_enabled","type","checkbox",1,"custom-control-input"],["for","lock_enabled",1,"custom-control-label"],d,g,E,["id","encryption_enabled","name","encryption_enabled","formControlName","encryption_enabled","type","checkbox",1,"form-check-input"],["for","encryption_enabled",1,"form-check-label"],S,["aria-label","toggle encryption helper"],G,["href","#/rgw/bucket/create","aria-label","click here",3,"click"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","id",1,"cd-col-form-label"],P,["id","id","name","id","type","text","formControlName","id","readonly","",1,"form-control"],[1,"invalid-feedback"],b,N,m,W,U,$,I,v,[3,"ngValue"],F,h,[3,"value"],L,["id","placement-target","name","placement-target","formControlName","placement-target",1,"form-select"],y,f,x,["id","placement-target","name","placement-target","formControlName","placement-target","type","text","readonly","",1,"form-control"],T,["type","checkbox","id","versioning","name","versioning","formControlName","versioning",1,"custom-control-input",3,"change"],["for","versioning",1,"custom-control-label"],B,C,se,["type","checkbox","id","mfa-delete","name","mfa-delete","formControlName","mfa-delete",1,"custom-control-input",3,"change"],["for","mfa-delete",1,"custom-control-label"],ae,re,["for","mfa-token-serial",1,"cd-col-form-label"],le,["type","text","id","mfa-token-serial","name","mfa-token-serial","formControlName","mfa-token-serial",1,"form-control"],ce,["for","mfa-token-pin",1,"cd-col-form-label"],de,["type","text","id","mfa-token-pin","name","mfa-token-pin","formControlName","mfa-token-pin",1,"form-control"],ue,["for","lock_mode",1,"cd-col-form-label"],Re,["formControlName","lock_mode","name","lock_mode","id","lock_mode",1,"form-select"],["value","COMPLIANCE"],Ee,["value","GOVERNANCE"],ge,["for","lock_retention_period_days",1,"cd-col-form-label"],fe,Se,["type","number","id","lock_retention_period_days","formControlName","lock_retention_period_days","min","0",1,"form-control"],Te,Ce,[1,"custom-control","custom-radio","custom-control-inline","ps-5"],["formControlName","encryption_type","id","sse_S3_enabled","type","radio","name","encryption_type","value","AES256",1,"form-check-input"],["for","sse_S3_enabled",1,"form-control-label"],Me,["formControlName","encryption_type","id","kms_enabled","name","encryption_type","value","aws:kms","type","radio",1,"form-check-input"],["for","kms_enabled",1,"form-control-label"],pe,["for","kms_provider",1,"cd-col-form-label","required"],me,["id","kms_provider","name","kms_provider","formControlName","kms_provider",1,"form-select",3,"autofocus"],Ae,Ge,["for","keyId",1,"cd-col-form-label","required"],Pe,["id","keyId","name","keyId","type","text","formControlName","keyId",1,"form-control"],Oe]},template:function(_,n){1&_&&e.YNc(0,Bt,73,43,"div",0),2&_&&e.Q6J("cdFormLoading",n.loading)},directives:[ze.y,a._Y,a.JL,Y.V,a.sg,M.O5,H.P,q.o,a.Fj,X.b,a.JJ,a.u,M.mk,ie.U,a.EJ,a.YN,a.Kr,M.sg,a.Wl,Le.S,a.wV,a.qQ,a._,J.p],pipes:[M.rS,V.m],styles:[""]}),t})();var Je=l(70882),ye=l(68136),Ve=l(30982),j=l(83697),De=l(68774),je=l(47557),e_=l(66369),ee=l(51847),Ue=l(47640),__=l(94928),Ht=l(96102),t_=l(68962);function Xt(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,25),e.qZA())}function Kt(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimless"),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.selection.bucket_quota.max_size)," ")}}function zt(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,26),e.qZA())}function Qt(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",_.selection.bucket_quota.max_objects," ")}}function Yt(t,o){if(1&t&&(e.TgZ(0,"div")(1,"legend"),e.SDv(2,21),e.qZA(),e.TgZ(3,"table",1)(4,"tbody")(5,"tr")(6,"td",2),e.SDv(7,22),e.qZA(),e.TgZ(8,"td",4),e._uU(9),e.ALo(10,"booleanText"),e.qZA()(),e.TgZ(11,"tr")(12,"td",5),e.SDv(13,23),e.qZA(),e.YNc(14,Xt,2,0,"td",0),e.YNc(15,Kt,3,3,"td",0),e.qZA(),e.TgZ(16,"tr")(17,"td",5),e.SDv(18,24),e.qZA(),e.YNc(19,zt,2,0,"td",0),e.YNc(20,Qt,2,1,"td",0),e.qZA()()()()),2&t){const _=e.oxw(2);e.xp6(9),e.Oqu(e.lcZ(10,5,_.selection.bucket_quota.enabled)),e.xp6(5),e.Q6J("ngIf",_.selection.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",_.selection.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_objects>-1)}}function Jt(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"tr")(2,"td",5),e.SDv(3,27),e.qZA(),e.TgZ(4,"td"),e._uU(5),e.qZA()(),e.TgZ(6,"tr")(7,"td",5),e.SDv(8,28),e.qZA(),e.TgZ(9,"td"),e._uU(10),e.qZA()(),e.BQk()),2&t){const _=e.oxw(2);e.xp6(5),e.Oqu(_.selection.lock_mode),e.xp6(5),e.Oqu(_.selection.lock_retention_period_days)}}function Vt(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"table",1)(2,"tbody")(3,"tr")(4,"td",2),e.SDv(5,3),e.qZA(),e.TgZ(6,"td",4),e._uU(7),e.qZA()(),e.TgZ(8,"tr")(9,"td",5),e.SDv(10,6),e.qZA(),e.TgZ(11,"td"),e._uU(12),e.qZA()(),e.TgZ(13,"tr")(14,"td",5),e.SDv(15,7),e.qZA(),e.TgZ(16,"td"),e._uU(17),e.qZA()(),e.TgZ(18,"tr")(19,"td",5),e.SDv(20,8),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.qZA()(),e.TgZ(23,"tr")(24,"td",5),e.SDv(25,9),e.qZA(),e.TgZ(26,"td"),e._uU(27),e.qZA()(),e.TgZ(28,"tr")(29,"td",5),e.SDv(30,10),e.qZA(),e.TgZ(31,"td"),e._uU(32),e.qZA()(),e.TgZ(33,"tr")(34,"td",5),e.SDv(35,11),e.qZA(),e.TgZ(36,"td"),e._uU(37),e.qZA()(),e.TgZ(38,"tr")(39,"td",5),e.SDv(40,12),e.qZA(),e.TgZ(41,"td"),e._uU(42),e.qZA()(),e.TgZ(43,"tr")(44,"td",5),e.SDv(45,13),e.qZA(),e.TgZ(46,"td"),e._uU(47),e.qZA()(),e.TgZ(48,"tr")(49,"td",5),e.SDv(50,14),e.qZA(),e.TgZ(51,"td"),e._uU(52),e.ALo(53,"cdDate"),e.qZA()(),e.TgZ(54,"tr")(55,"td",5),e.SDv(56,15),e.qZA(),e.TgZ(57,"td"),e._uU(58),e.qZA()(),e.TgZ(59,"tr")(60,"td",5),e.SDv(61,16),e.qZA(),e.TgZ(62,"td"),e._uU(63),e.qZA()(),e.TgZ(64,"tr")(65,"td",5),e.SDv(66,17),e.qZA(),e.TgZ(67,"td"),e._uU(68),e.qZA()(),e.TgZ(69,"tr")(70,"td",5),e.SDv(71,18),e.qZA(),e.TgZ(72,"td"),e._uU(73),e.qZA()()()(),e.YNc(74,Yt,21,7,"div",0),e.TgZ(75,"legend"),e.SDv(76,19),e.qZA(),e.TgZ(77,"table",1)(78,"tbody")(79,"tr")(80,"td",2),e.SDv(81,20),e.qZA(),e.TgZ(82,"td",4),e._uU(83),e.ALo(84,"booleanText"),e.qZA()(),e.YNc(85,Jt,11,2,"ng-container",0),e.qZA()(),e.BQk()),2&t){const _=e.oxw();e.xp6(7),e.Oqu(_.selection.bid),e.xp6(5),e.Oqu(_.selection.id),e.xp6(5),e.Oqu(_.selection.owner),e.xp6(5),e.Oqu(_.selection.index_type),e.xp6(5),e.Oqu(_.selection.placement_rule),e.xp6(5),e.Oqu(_.selection.marker),e.xp6(5),e.Oqu(_.selection.max_marker),e.xp6(5),e.Oqu(_.selection.ver),e.xp6(5),e.Oqu(_.selection.master_ver),e.xp6(5),e.Oqu(e.lcZ(53,17,_.selection.mtime)),e.xp6(6),e.Oqu(_.selection.zonegroup),e.xp6(5),e.Oqu(_.selection.versioning),e.xp6(5),e.Oqu(_.selection.encryption),e.xp6(5),e.Oqu(_.selection.mfa_delete),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota),e.xp6(9),e.Oqu(e.lcZ(84,19,_.selection.lock_enabled)),e.xp6(2),e.Q6J("ngIf",_.selection.lock_enabled)}}let jt=(()=>{class t{constructor(_){this.rgwBucketService=_}ngOnChanges(){this.selection&&this.rgwBucketService.get(this.selection.bid).subscribe(_=>{_.lock_retention_period_days=this.rgwBucketService.getLockDays(_),this.selection=_})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(be.o))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n,i,s,r,d,g,E,S,G,P,b,N,m,W,U,$,I,v,F,h,L,y;return o="Name",_="ID",n="Owner",i="Index type",s="Placement rule",r="Marker",d="Maximum marker",g="Version",E="Master version",S="Modification time",G="Zonegroup",P="Versioning",b="Encryption",N="MFA Delete",m="Locking",W="Enabled",U="Bucket quota",$="Enabled",I="Maximum size",v="Maximum objects",F="Unlimited",h="Unlimited",L="Mode",y="Days",[[4,"ngIf"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],o,[1,"w-75"],[1,"bold"],_,n,i,s,r,d,g,E,S,G,P,b,N,m,W,U,$,I,v,F,h,L,y]},template:function(_,n){1&_&&e.YNc(0,Vt,86,21,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[M.O5],pipes:[Ht.N,t_.T,e_.n],styles:["table[_ngcontent-%COMP%]{table-layout:fixed}table[_ngcontent-%COMP%] td[_ngcontent-%COMP%]{word-wrap:break-word}"]}),t})();var n_=l(60251);const en=["bucketSizeTpl"],_n=["bucketObjectTpl"];function tn(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_size)("used",_.bucket_size)}}function nn(t,o){1&t&&e.SDv(0,9)}function on(t,o){if(1&t&&(e.YNc(0,tn,1,2,"cd-usage-bar",6),e.YNc(1,nn,1,0,"ng-template",null,7,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_size>0&&_.bucket_quota.enabled)("ngIfElse",n)}}function sn(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_objects)("used",_.num_objects)("isBinary",!1)}}function an(t,o){1&t&&e.SDv(0,13)}function rn(t,o){if(1&t&&(e.YNc(0,sn,1,3,"cd-usage-bar",10),e.YNc(1,an,1,0,"ng-template",null,11,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_objects>0&&_.bucket_quota.enabled)("ngIfElse",n)}}let cn=(()=>{class t extends ye.o{constructor(_,n,i,s,r,d,g,E){super(E),this.authStorageService=_,this.dimlessBinaryPipe=n,this.dimlessPipe=i,this.rgwBucketService=s,this.modalService=r,this.urlBuilder=d,this.actionLabels=g,this.ngZone=E,this.columns=[],this.buckets=[],this.selection=new De.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Name",prop:"bid",flexGrow:2},{name:"Owner",prop:"owner",flexGrow:2.5},{name:"Used Capacity",prop:"bucket_size",flexGrow:.6,pipe:this.dimlessBinaryPipe},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.bucketSizeTpl,flexGrow:.8},{name:"Objects",prop:"num_objects",flexGrow:.6,pipe:this.dimlessPipe},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.bucketObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().bid)}`;this.tableActions=[{permission:"create",icon:k.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:r=>!r.hasSelection},{permission:"update",icon:k.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:k.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:r=>r.hasMultiSelection}],this.setTableRefreshTimeout()}transformBucketData(){R().forEach(this.buckets,_=>{const n=_.bucket_quota.max_size,i=_.bucket_quota.max_objects;_.bucket_size=0,_.num_objects=0,R().isEmpty(_.usage)||(_.bucket_size=_.usage["rgw.main"].size_actual,_.num_objects=_.usage["rgw.main"].num_objects),_.size_usage=n>0?_.bucket_size/n:void 0,_.object_usage=i>0?_.num_objects/i:void 0})}getBucketList(_){this.setTableRefreshTimeout(),this.rgwBucketService.list(!0).subscribe(n=>{this.buckets=n,this.transformBucketData()},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(Ve.M,{itemDescription:this.selection.hasSingleSelection?"bucket":"buckets",itemNames:this.selection.selected.map(_=>_.bid),submitActionObservable:()=>new Je.y(_=>{(0,_e.D)(this.selection.selected.map(n=>this.rgwBucketService.delete(n.bid))).subscribe({error:n=>{_.error(n),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ue.j),e.Y36(je.$),e.Y36(e_.n),e.Y36(be.o),e.Y36(oe.Z),e.Y36(ee.F),e.Y36(A.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-list"]],viewQuery:function(_,n){if(1&_&&(e.Gf(j.a,7),e.Gf(en,7),e.Gf(_n,7)),2&_){let i;e.iGM(i=e.CRH())&&(n.table=i.first),e.iGM(i=e.CRH())&&(n.bucketSizeTpl=i.first),e.iGM(i=e.CRH())&&(n.bucketObjectTpl=i.first)}},features:[e._Bn([{provide:ee.F,useValue:new ee.F("rgw/bucket")}]),e.qOj],decls:8,vars:9,consts:function(){let o,_;return o="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","bid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["bucketSizeTpl",""],["bucketObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],o,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,n){1&_&&(e.TgZ(0,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return n.setExpandedRow(s)})("updateSelection",function(s){return n.updateSelection(s)})("fetchData",function(s){return n.getBucketList(s)}),e._UZ(2,"cd-table-actions",2)(3,"cd-rgw-bucket-details",3),e.qZA(),e.YNc(4,on,3,2,"ng-template",null,4,e.W1O),e.YNc(6,rn,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.Q6J("autoReload",!1)("data",n.buckets)("columns",n.columns)("hasDetails",!0)("status",n.tableStatus),e.xp6(2),e.Q6J("permission",n.permission)("selection",n.selection)("tableActions",n.tableActions),e.xp6(1),e.Q6J("selection",n.expandedRow))},directives:[j.a,__.K,jt,M.O5,n_.O],styles:[""]}),t})();var dn=l(58111),un=l(59376),Rn=l(61350),En=l(60351),o_=l(76317);function gn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table-key-value",11),e.NdJ("fetchData",function(){return e.CHM(_),e.oxw(2).getMetaData()}),e.qZA()}if(2&t){const _=e.oxw(2);e.Q6J("data",_.metadata)}}function fn(t,o){if(1&t&&e._UZ(0,"cd-table-performance-counter",12),2&t){const _=e.oxw(2);e.Q6J("serviceId",_.serviceMapId)}}function Sn(t,o){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.Q6J("grafanaPath","rgw-instance-detail?var-rgw_servers=rgw."+_.serviceId)("type","metrics")}}function Tn(t,o){1&t&&(e.ynx(0,13),e.TgZ(1,"a",4),e.SDv(2,14),e.qZA(),e.YNc(3,Sn,1,2,"ng-template",6),e.BQk())}function Cn(t,o){if(1&t&&(e.ynx(0),e.TgZ(1,"nav",1,2),e.ynx(3,3),e.TgZ(4,"a",4),e.SDv(5,5),e.qZA(),e.YNc(6,gn,1,1,"ng-template",6),e.BQk(),e.ynx(7,7),e.TgZ(8,"a",4),e.SDv(9,8),e.qZA(),e.YNc(10,fn,1,1,"ng-template",6),e.BQk(),e.YNc(11,Tn,4,0,"ng-container",9),e.qZA(),e._UZ(12,"div",10),e.BQk()),2&t){const _=e.MAs(2),n=e.oxw();e.xp6(11),e.Q6J("ngIf",n.grafanaPermission.read),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let Mn=(()=>{class t{constructor(_,n){this.rgwDaemonService=_,this.authStorageService=n,this.serviceId="",this.serviceMapId="",this.grafanaPermission=this.authStorageService.getPermissions().grafana}ngOnChanges(){this.selection&&(this.serviceId=this.selection.id,this.serviceMapId=this.selection.service_map_id)}getMetaData(){R().isEmpty(this.serviceId)||this.rgwDaemonService.get(this.serviceId).subscribe(_=>{this.metadata=_.rgw_metadata})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ne.b),e.Y36(Ue.j))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n,i;return o="Details",_="Performance Counters",n="Performance Details",i="RGW instance details",[[4,"ngIf"],["ngbNav","","cdStatefulTab","rgw-daemon-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","performance-counters"],_,["ngbNavItem","performance-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"data","fetchData"],["serviceType","rgw",3,"serviceId"],["ngbNavItem","performance-details"],n,["title",i,"uid","x5ARzZtmk","grafanaStyle","one",3,"grafanaPath","type"]]},template:function(_,n){1&_&&e.YNc(0,Cn,13,2,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[M.O5,O.Pz,un.m,O.nv,O.Vx,O.uN,Rn.b,En.p,o_.F,O.tO],styles:[""]}),t})();function pn(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",8),e.NdJ("setExpandedRow",function(i){return e.CHM(_),e.oxw().setExpandedRow(i)})("fetchData",function(i){return e.CHM(_),e.oxw().getDaemonList(i)}),e._UZ(1,"cd-rgw-daemon-details",9),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.daemons)("columns",_.columns)("hasDetails",!0),e.xp6(1),e.Q6J("selection",_.expandedRow)}}function mn(t,o){1&t&&e._UZ(0,"cd-grafana",11),2&t&&e.Q6J("grafanaPath","rgw-overview?")("type","metrics")}function An(t,o){1&t&&(e.ynx(0,2),e.TgZ(1,"a",3),e.SDv(2,10),e.qZA(),e.YNc(3,mn,1,2,"ng-template",5),e.BQk())}function Gn(t,o){1&t&&e._UZ(0,"cd-grafana",13),2&t&&e.Q6J("grafanaPath","radosgw-sync-overview?")("type","metrics")}function Pn(t,o){1&t&&(e.ynx(0,2),e.TgZ(1,"a",3),e.SDv(2,12),e.qZA(),e.YNc(3,Gn,1,2,"ng-template",5),e.BQk())}let On=(()=>{class t extends ye.o{constructor(_,n,i,s){super(),this.rgwDaemonService=_,this.authStorageService=n,this.cephShortVersionPipe=i,this.rgwSiteService=s,this.columns=[],this.daemons=[],this.updateDaemons=r=>{this.daemons=r}}ngOnInit(){this.grafanaPermission=this.authStorageService.getPermissions().grafana,this.columns=[{name:"ID",prop:"id",flexGrow:2},{name:"Hostname",prop:"server_hostname",flexGrow:2},{name:"Port",prop:"port",flexGrow:1},{name:"Realm",prop:"realm_name",flexGrow:2},{name:"Zone Group",prop:"zonegroup_name",flexGrow:2},{name:"Zone",prop:"zone_name",flexGrow:2},{name:"Version",prop:"version",flexGrow:1,pipe:this.cephShortVersionPipe}],this.rgwSiteService.get("realms").subscribe(_=>this.isMultiSite=_.length>0)}getDaemonList(_){this.rgwDaemonService.list().subscribe(this.updateDaemons,()=>{_.error()})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ne.b),e.Y36(Ue.j),e.Y36(dn.F),e.Y36(qe.I))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-list"]],features:[e.qOj],decls:9,vars:3,consts:function(){let o,_,n,i,s;return o="Gateways List",_="Overall Performance",n="RGW overview",i="Sync Performance",s="Radosgw sync overview",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],o,["ngbNavContent",""],["ngbNavItem","",4,"ngIf"],[3,"ngbNavOutlet"],["columnMode","flex",3,"data","columns","hasDetails","setExpandedRow","fetchData"],["cdTableDetail","",3,"selection"],_,["title",n,"uid","WAkugZpiz","grafanaStyle","two",3,"grafanaPath","type"],i,["title",s,"uid","rgw-sync-overview","grafanaStyle","two",3,"grafanaPath","type"]]},template:function(_,n){if(1&_&&(e.TgZ(0,"nav",0,1),e.ynx(2,2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,pn,2,4,"ng-template",5),e.BQk(),e.YNc(6,An,4,0,"ng-container",6),e.YNc(7,Pn,4,0,"ng-container",6),e.qZA(),e._UZ(8,"div",7)),2&_){const i=e.MAs(1);e.xp6(6),e.Q6J("ngIf",n.grafanaPermission.read),e.xp6(1),e.Q6J("ngIf",n.grafanaPermission.read&&n.isMultiSite),e.xp6(1),e.Q6J("ngbNavOutlet",i)}},directives:[O.Pz,O.nv,O.Vx,O.uN,j.a,Mn,M.O5,o_.F,O.tO],styles:[""]}),t})();var bn=l(6481),xe=l(28211),$e=(()=>{return(t=$e||($e={})).USERS="users",t.BUCKETS="buckets",t.METADATA="metadata",t.USAGE="usage",t.ZONE="zone",$e;var t})();let i_=(()=>{class t{static getAll(){return Object.values(t.capabilities)}}return t.capabilities=$e,t})();function Nn(t,o){1&t&&e._UZ(0,"input",22),2&t&&e.Q6J("readonly",!0)}function Wn(t,o){1&t&&(e.TgZ(0,"option",17),e.SDv(1,25),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Un(t,o){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function $n(t,o){if(1&t&&(e.TgZ(0,"select",23),e.YNc(1,Wn,2,1,"option",24),e.YNc(2,Un,2,2,"option",19),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.types),e.xp6(1),e.Q6J("ngForOf",_.types)}}function In(t,o){1&t&&(e.TgZ(0,"span",27),e.SDv(1,28),e.qZA())}function vn(t,o){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Fn(t,o){1&t&&(e.TgZ(0,"span",27),e.SDv(1,29),e.qZA())}const hn=function(t){return{required:t}},Ln=function(){return["read","write","*"]};let yn=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.activeModal=n,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.types=[],this.resource="capability",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({type:[null,[a.kI.required]],perm:[null,[a.kI.required]]})}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.ADD}setValues(_,n){this.formGroup.setValue({type:_,perm:n})}setCapabilities(_){const n=[];_.forEach(i=>{n.push(i.type)}),this.types=[],i_.getAll().forEach(i=>{-1===R().indexOf(n,i)&&this.types.push(i)})}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Q.O),e.Y36(O.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-capability-modal"]],outputs:{submitAction:"submitAction"},decls:29,vars:24,consts:function(){let o,_,n,i,s,r,d;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Type",n="Permission",i="-- Select a permission --",s="-- Select a type --",r="This field is required.",d="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","type",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","type","class","form-control","type","text","formControlName","type",3,"readonly",4,"ngIf"],["id","type","class","form-select","formControlName","type","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],n,["id","perm","formControlName","perm",1,"form-select"],[3,"ngValue"],i,[3,"value",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["id","type","type","text","formControlName","type",1,"form-control",3,"readonly"],["id","type","formControlName","type","autofocus","",1,"form-select"],[3,"ngValue",4,"ngIf"],s,[3,"value"],[1,"invalid-feedback"],r,d]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,Nn,1,1,"input",11),e.YNc(14,$n,3,2,"select",12),e.YNc(15,In,2,0,"span",13),e.qZA()(),e.TgZ(16,"div",7)(17,"label",14),e.SDv(18,15),e.qZA(),e.TgZ(19,"div",10)(20,"select",16)(21,"option",17),e.SDv(22,18),e.qZA(),e.YNc(23,vn,2,2,"option",19),e.qZA(),e.YNc(24,Fn,2,0,"span",13),e.qZA()()(),e.TgZ(25,"div",20)(26,"cd-form-button-panel",21),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(27,"titlecase"),e.ALo(28,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,13,n.action))(e.lcZ(4,15,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(21,hn,!n.editing)),e.xp6(3),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",!n.editing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("type",i,"required")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(23,Ln)),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("perm",i,"required")),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(27,17,n.action)+" "+e.lcZ(28,19,n.resource))}},directives:[ne.z,a._Y,a.JL,Y.V,a.sg,H.P,M.mk,M.O5,q.o,a.Fj,X.b,a.JJ,a.u,a.EJ,ie.U,a.YN,a.Kr,M.sg,J.p],pipes:[M.rS,V.m],styles:[""]}),t})();var Ie=l(4416),ve=l(58039);function Dn(t,o){1&t&&e._UZ(0,"input",17),2&t&&e.Q6J("readonly",!0)}function xn(t,o){1&t&&(e.TgZ(0,"option",21),e.SDv(1,22),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Zn(t,o){if(1&t&&(e.TgZ(0,"option",23),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function wn(t,o){if(1&t&&(e.TgZ(0,"select",18),e.YNc(1,xn,2,1,"option",19),e.YNc(2,Zn,2,2,"option",20),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.userCandidates),e.xp6(1),e.Q6J("ngForOf",_.userCandidates)}}function kn(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function qn(t,o){1&t&&(e.TgZ(0,"div",7)(1,"div",26)(2,"div",27),e._UZ(3,"input",28),e.TgZ(4,"label",29),e.SDv(5,30),e.qZA()()()())}function Bn(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,37),e.qZA())}const Ze=function(t){return{required:t}};function Hn(t,o){if(1&t&&(e.TgZ(0,"div",7)(1,"label",31),e.SDv(2,32),e.qZA(),e.TgZ(3,"div",10)(4,"div",33),e._UZ(5,"input",34)(6,"button",35)(7,"cd-copy-2-clipboard-button",36),e.qZA(),e.YNc(8,Bn,2,0,"span",13),e.qZA()()),2&t){const _=e.oxw(),n=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ze,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(3),e.Q6J("ngIf",_.formGroup.showError("access_key",n,"required"))}}function Xn(t,o){1&t&&(e.TgZ(0,"span",24),e.SDv(1,43),e.qZA())}function Kn(t,o){if(1&t&&(e.TgZ(0,"div",7)(1,"label",38),e.SDv(2,39),e.qZA(),e.TgZ(3,"div",10)(4,"div",33),e._UZ(5,"input",40)(6,"button",41)(7,"cd-copy-2-clipboard-button",42),e.qZA(),e.YNc(8,Xn,2,0,"span",13),e.qZA()()),2&t){const _=e.oxw(),n=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ze,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(3),e.Q6J("ngIf",_.formGroup.showError("secret_key",n,"required"))}}let s_=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.activeModal=n,this.actionLabels=i,this.submitAction=new e.vpe,this.viewing=!0,this.userCandidates=[],this.resource="S3 Key",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({user:[null,[a.kI.required]],generate_key:[!0],access_key:[null,[p.h.requiredIf({generate_key:!1})]],secret_key:[null,[p.h.requiredIf({generate_key:!1})]]})}setViewing(_=!0){this.viewing=_,this.action=this.viewing?this.actionLabels.SHOW:this.actionLabels.CREATE}setValues(_,n,i){this.formGroup.setValue({user:_,generate_key:R().isEmpty(n),access_key:n,secret_key:i})}setUserCandidates(_){this.userCandidates=_}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Q.O),e.Y36(O.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-s3-key-modal"]],outputs:{submitAction:"submitAction"},decls:23,vars:24,consts:function(){let o,_,n,i,s,r,d,g,E;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="-- Select a username --",i="This field is required.",s="Auto-generate key",r="Access key",d="This field is required.",g="Secret key",E="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user","class","form-control","type","text","formControlName","user",3,"readonly",4,"ngIf"],["id","user","class","form-control","formControlName","user","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","showSubmit","submitActionEvent"],["id","user","type","text","formControlName","user",1,"form-control",3,"readonly"],["id","user","formControlName","user","autofocus","",1,"form-control"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],n,[3,"value"],[1,"invalid-feedback"],i,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],s,["for","access_key",1,"cd-col-form-label",3,"ngClass"],r,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control",3,"readonly"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],d,["for","secret_key",1,"cd-col-form-label",3,"ngClass"],g,["id","secret_key","type","password","formControlName","secret_key",1,"form-control",3,"readonly"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],E]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,Dn,1,1,"input",11),e.YNc(14,wn,3,2,"select",12),e.YNc(15,kn,2,0,"span",13),e.qZA()(),e.YNc(16,qn,6,0,"div",14),e.YNc(17,Hn,9,5,"div",14),e.YNc(18,Kn,9,5,"div",14),e.qZA(),e.TgZ(19,"div",15)(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(21,"titlecase"),e.ALo(22,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,14,n.action))(e.lcZ(4,16,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(22,Ze,!n.viewing)),e.xp6(3),e.Q6J("ngIf",n.viewing),e.xp6(1),e.Q6J("ngIf",!n.viewing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",!n.viewing),e.xp6(1),e.Q6J("ngIf",!n.formGroup.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!n.formGroup.getValue("generate_key")),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(21,18,n.action)+" "+e.lcZ(22,20,n.resource))("showSubmit",!n.viewing)}},directives:[ne.z,a._Y,a.JL,Y.V,a.sg,H.P,M.mk,M.O5,q.o,a.Fj,X.b,a.JJ,a.u,a.EJ,ie.U,a.YN,a.Kr,M.sg,a.Wl,Ie.C,ve.s,J.p],pipes:[M.rS,V.m],styles:[""]}),t})();class zn{}function Qn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function Yn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,31),e.qZA())}function Jn(t,o){if(1&t&&(e.TgZ(0,"option",32),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Vn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,33),e.qZA())}function jn(t,o){1&t&&(e.TgZ(0,"span",29),e.SDv(1,47),e.qZA())}function eo(t,o){if(1&t&&(e.TgZ(0,"div",7)(1,"label",41),e.SDv(2,42),e.qZA(),e.TgZ(3,"div",10)(4,"div",43),e._UZ(5,"input",44)(6,"button",45)(7,"cd-copy-2-clipboard-button",46),e.qZA(),e.YNc(8,jn,2,0,"span",15),e.qZA()()),2&t){const _=e.oxw(2),n=e.MAs(7);e.xp6(8),e.Q6J("ngIf",_.formGroup.showError("secret_key",n,"required"))}}function _o(t,o){if(1&t&&(e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,34),e.qZA(),e.TgZ(3,"div",7)(4,"div",35)(5,"div",36),e._UZ(6,"input",37),e.TgZ(7,"label",38),e.SDv(8,39),e.qZA()()()(),e.YNc(9,eo,9,1,"div",40),e.qZA()),2&t){const _=e.oxw();e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.formGroup.getValue("generate_secret"))}}const to=function(t){return{required:t}},no=function(){return["read","write"]};let oo=(()=>{class t{constructor(_,n,i){this.formBuilder=_,this.bsModalRef=n,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.subusers=[],this.resource="Subuser",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({uid:[null],subuid:[null,[a.kI.required,this.subuserValidator()]],perm:[null,[a.kI.required]],generate_secret:[!0],secret_key:[null,[p.h.requiredIf({generate_secret:!1})]]})}subuserValidator(){const _=this;return n=>_.editing||(0,p.P)(n.value)?null:_.subusers.some(s=>R().isEqual(_.getSubuserName(s.id),n.value))?{subuserIdExists:!0}:null}getSubuserName(_){if(R().isEmpty(_))return _;const n=_.match(/([^:]+)(:(.+))?/);return R().isUndefined(n[3])?n[1]:n[3]}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE}setValues(_,n="",i=""){this.formGroup.setValue({uid:_,subuid:this.getSubuserName(n),perm:i,generate_secret:!0,secret_key:null})}setSubusers(_){this.subusers=_}onSubmit(){const _=this.formGroup.value,n=new zn;n.id=`${_.uid}:${_.subuid}`,n.permissions=_.perm,n.generate_secret=_.generate_secret,n.secret_key=_.secret_key,this.submitAction.emit(n),this.bsModalRef.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Q.O),e.Y36(O.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-subuser-modal"]],outputs:{submitAction:"submitAction"},decls:39,vars:26,consts:function(){let o,_,n,i,s,r,d,g,E,S,G,P,b,N;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="Subuser",i="Permission",s="-- Select a permission --",r="read, write",d="full",g="This field is required.",E="The chosen subuser ID is already in use.",S="This field is required.",G="Swift key",P="Auto-generate secret",b="Secret key",N="This field is required.",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","uid",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","uid","type","text","formControlName","uid",1,"form-control",3,"readonly"],["for","subuid",1,"cd-col-form-label",3,"ngClass"],n,["id","subuid","type","text","formControlName","subuid","autofocus","",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],i,["id","perm","formControlName","perm",1,"form-select"],[3,"ngValue"],s,[3,"value",4,"ngFor","ngForOf"],["value","read-write"],r,["value","full-control"],d,[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],g,E,[3,"value"],S,G,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_secret","type","checkbox","formControlName","generate_secret",1,"custom-control-input"],["for","generate_secret",1,"custom-control-label"],P,["class","form-group row",4,"ngIf"],["for","secret_key",1,"cd-col-form-label","required"],b,[1,"input-group"],["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],N]},template:function(_,n){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.qZA()(),e.TgZ(14,"div",7)(15,"label",12),e.SDv(16,13),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",14),e.YNc(19,Qn,2,0,"span",15),e.YNc(20,Yn,2,0,"span",15),e.qZA()(),e.TgZ(21,"div",7)(22,"label",16),e.SDv(23,17),e.qZA(),e.TgZ(24,"div",10)(25,"select",18)(26,"option",19),e.SDv(27,20),e.qZA(),e.YNc(28,Jn,2,2,"option",21),e.TgZ(29,"option",22),e.SDv(30,23),e.qZA(),e.TgZ(31,"option",24),e.SDv(32,25),e.qZA()(),e.YNc(33,Vn,2,0,"span",15),e.qZA()(),e.YNc(34,_o,10,1,"fieldset",26),e.qZA(),e.TgZ(35,"div",27)(36,"cd-form-button-panel",28),e.NdJ("submitActionEvent",function(){return n.onSubmit()}),e.ALo(37,"titlecase"),e.ALo(38,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",n.bsModalRef),e.xp6(4),e.pQV(e.lcZ(3,15,n.action))(e.lcZ(4,17,n.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",n.formGroup),e.xp6(7),e.Q6J("readonly",!0),e.xp6(2),e.Q6J("ngClass",e.VKq(23,to,!n.editing)),e.xp6(3),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("subuid",i,"required")),e.xp6(1),e.Q6J("ngIf",n.formGroup.showError("subuid",i,"subuserIdExists")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(25,no)),e.xp6(5),e.Q6J("ngIf",n.formGroup.showError("perm",i,"required")),e.xp6(1),e.Q6J("ngIf",!n.editing),e.xp6(2),e.Q6J("form",n.formGroup)("submitText",e.lcZ(37,19,n.action)+" "+e.lcZ(38,21,n.resource))}},directives:[ne.z,a._Y,a.JL,Y.V,a.sg,H.P,q.o,a.Fj,X.b,a.JJ,a.u,M.mk,ie.U,M.O5,a.EJ,a.YN,a.Kr,M.sg,a.Wl,Ie.C,ve.s,J.p],pipes:[M.rS,V.m],styles:[""]}),t})();var io=l(13472);let a_=(()=>{class t{constructor(_,n){this.activeModal=_,this.actionLabels=n,this.resource="Swift Key",this.action=this.actionLabels.SHOW}setValues(_,n){this.user=_,this.secret_key=n}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(O.Kz),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-swift-key-modal"]],decls:23,vars:11,consts:function(){let o,_,n;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",n="Secret key",[[3,"modalRef"],[1,"modal-title"],o,[1,"modal-content"],[1,"modal-body"],["novalidate",""],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","user","name","user","type","text",1,"form-control",3,"readonly","ngModel","ngModelChange"],["for","secret_key",1,"cd-col-form-label"],n,[1,"input-group"],["id","secret_key","name","secret_key","type","password",1,"form-control",3,"ngModel","readonly","ngModelChange"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],[1,"modal-footer"],[3,"backAction"]]},template:function(_,n){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"div",4)(7,"form",5)(8,"div",6)(9,"label",7),e.SDv(10,8),e.qZA(),e.TgZ(11,"div",9)(12,"input",10),e.NdJ("ngModelChange",function(s){return n.user=s}),e.qZA()()(),e.TgZ(13,"div",6)(14,"label",11),e.SDv(15,12),e.qZA(),e.TgZ(16,"div",9)(17,"div",13)(18,"input",14),e.NdJ("ngModelChange",function(s){return n.secret_key=s}),e.qZA(),e._UZ(19,"button",15)(20,"cd-copy-2-clipboard-button",16),e.qZA()()()()(),e.TgZ(21,"div",17)(22,"cd-back-button",18),e.NdJ("backAction",function(){return n.activeModal.close()}),e.qZA()(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",n.activeModal),e.xp6(4),e.pQV(e.lcZ(3,7,n.action))(e.lcZ(4,9,n.resource)),e.QtT(2),e.xp6(8),e.Q6J("readonly",!0)("ngModel",n.user),e.xp6(6),e.Q6J("ngModel",n.secret_key)("readonly",!0))},directives:[ne.z,a._Y,a.JL,a.F,H.P,q.o,a.Fj,X.b,a.JJ,a.On,Ie.C,ve.s,io.W],pipes:[M.rS,V.m],styles:[""]}),t})();var so=l(17932);function ao(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,51),e.qZA())}function ro(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,52),e.qZA())}function lo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,53),e.qZA())}function co(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,57),e.qZA())}function uo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,58),e.qZA())}function Ro(t,o){if(1&t&&(e.TgZ(0,"div",8)(1,"label",54),e.SDv(2,55),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",56),e.YNc(5,co,2,0,"span",13),e.YNc(6,uo,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(4),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("tenant",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("tenant",_,"notUnique"))}}function Eo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,59),e.qZA())}function go(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,60),e.qZA())}function fo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,61),e.qZA())}function So(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,62),e.qZA())}function To(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,65),e.qZA())}function Co(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,66),e.qZA())}function Mo(t,o){if(1&t&&(e.TgZ(0,"div",8),e._UZ(1,"label",63),e.TgZ(2,"div",11),e._UZ(3,"input",64),e.YNc(4,To,2,0,"span",13),e.YNc(5,Co,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(4),e.Q6J("ngIf",n.userForm.showError("max_buckets",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("max_buckets",_,"min"))}}function po(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,77),e.qZA())}function mo(t,o){if(1&t&&(e.TgZ(0,"div",8)(1,"label",71),e.SDv(2,72),e.qZA(),e.TgZ(3,"div",11)(4,"div",73),e._UZ(5,"input",74)(6,"button",75)(7,"cd-copy-2-clipboard-button",76),e.qZA(),e.YNc(8,po,2,0,"span",13),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(8),e.Q6J("ngIf",n.userForm.showError("access_key",_,"required"))}}function Ao(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,83),e.qZA())}function Go(t,o){if(1&t&&(e.TgZ(0,"div",8)(1,"label",78),e.SDv(2,79),e.qZA(),e.TgZ(3,"div",11)(4,"div",73),e._UZ(5,"input",80)(6,"button",81)(7,"cd-copy-2-clipboard-button",82),e.qZA(),e.YNc(8,Ao,2,0,"span",13),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),n=e.oxw();e.xp6(8),e.Q6J("ngIf",n.userForm.showError("secret_key",_,"required"))}}function Po(t,o){if(1&t&&(e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,67),e.qZA(),e.TgZ(3,"div",8)(4,"div",14)(5,"div",15),e._UZ(6,"input",68),e.TgZ(7,"label",69),e.SDv(8,70),e.qZA()()()(),e.YNc(9,mo,9,1,"div",19),e.YNc(10,Go,9,1,"div",19),e.qZA()),2&t){const _=e.oxw(2);e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key"))}}function Oo(t,o){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,96),e.qZA()())}const Z=function(t){return[t]};function bo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"span",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"span",97),e._UZ(6,"i"),e.qZA(),e._UZ(7,"input",98),e.TgZ(8,"button",99),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showSubuserModal(s)}),e._UZ(9,"i",91),e.qZA(),e.TgZ(10,"button",100),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteSubuser(s)}),e._UZ(11,"i",91),e.qZA()(),e._UZ(12,"span",95),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(3),e.Tol(n.icons.user),e.xp6(1),e.s9C("value",_.id),e.xp6(2),e.Tol(n.icons.share),e.xp6(1),e.s9C("value","full-control"===_.permissions?"full":_.permissions),e.xp6(2),e.Q6J("ngClass",e.VKq(10,Z,n.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(12,Z,n.icons.destroy))}}function No(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,84),e.qZA(),e.TgZ(3,"div",85)(4,"div",14),e.YNc(5,Oo,3,0,"span",86),e.YNc(6,bo,13,14,"span",87),e.TgZ(7,"div",88)(8,"div",89)(9,"button",90),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showSubuserModal()}),e._UZ(10,"i",91),e.ynx(11),e.SDv(12,92),e.ALo(13,"titlecase"),e.ALo(14,"upperFirst"),e.BQk(),e.qZA()()(),e._UZ(15,"span",93),e.qZA()()()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.subusers.length),e.xp6(1),e.Q6J("ngForOf",_.subusers),e.xp6(4),e.Q6J("ngClass",e.VKq(9,Z,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(13,5,_.actionLabels.CREATE))(e.lcZ(14,7,_.subuserLabel)),e.QtT(12)}}function Wo(t,o){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,106),e.qZA()())}function Uo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"div",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"button",107),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showS3KeyModal(s)}),e._UZ(6,"i",91),e.qZA(),e.TgZ(7,"button",108),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteS3Key(s)}),e._UZ(8,"i",91),e.qZA()(),e._UZ(9,"span",95),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(3),e.Tol(n.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(2),e.Q6J("ngClass",e.VKq(6,Z,n.icons.show)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,Z,n.icons.destroy))}}function $o(t,o){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,109),e.qZA()())}function Io(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"span",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"button",110),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showSwiftKeyModal(s)}),e._UZ(6,"i",91),e.qZA()(),e._UZ(7,"span",95),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(3),e.Tol(n.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(2),e.Q6J("ngClass",e.VKq(5,Z,n.icons.show))}}function vo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,101),e.qZA(),e.TgZ(3,"div",8)(4,"label",63),e.SDv(5,102),e.qZA(),e.TgZ(6,"div",11),e.YNc(7,Wo,3,0,"span",86),e.YNc(8,Uo,10,10,"span",87),e.TgZ(9,"div",88)(10,"div",89)(11,"button",103),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showS3KeyModal()}),e._UZ(12,"i",91),e.ynx(13),e.SDv(14,104),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA()()(),e._UZ(17,"span",93),e.qZA(),e._UZ(18,"hr"),e.qZA(),e.TgZ(19,"div",8)(20,"label",63),e.SDv(21,105),e.qZA(),e.TgZ(22,"div",11),e.YNc(23,$o,3,0,"span",86),e.YNc(24,Io,8,7,"span",87),e.qZA()()()}if(2&t){const _=e.oxw(2);e.xp6(7),e.Q6J("ngIf",0===_.s3Keys.length),e.xp6(1),e.Q6J("ngForOf",_.s3Keys),e.xp6(4),e.Q6J("ngClass",e.VKq(11,Z,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,7,_.actionLabels.CREATE))(e.lcZ(16,9,_.s3keyLabel)),e.QtT(14),e.xp6(7),e.Q6J("ngIf",0===_.swiftKeys.length),e.xp6(1),e.Q6J("ngForOf",_.swiftKeys)}}function Fo(t,o){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,114),e.qZA()())}function ho(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"div",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"button",115),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).showCapabilityModal(s)}),e._UZ(6,"i",91),e.qZA(),e.TgZ(7,"button",116),e.NdJ("click",function(){const s=e.CHM(_).index;return e.oxw(3).deleteCapability(s)}),e._UZ(8,"i",91),e.qZA()(),e._UZ(9,"span",95),e.qZA()}if(2&t){const _=o.$implicit,n=e.oxw(3);e.xp6(3),e.Tol(n.icons.share),e.xp6(1),e.hYB("value","",_.type,":",_.perm,""),e.xp6(2),e.Q6J("ngClass",e.VKq(7,Z,n.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(9,Z,n.icons.destroy))}}function Lo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,111),e.qZA(),e.TgZ(3,"div",8)(4,"div",14),e.YNc(5,Fo,3,0,"span",86),e.YNc(6,ho,10,11,"span",87),e.TgZ(7,"div",88)(8,"div",89)(9,"button",112),e.NdJ("click",function(){return e.CHM(_),e.oxw(2).showCapabilityModal()}),e.ALo(10,"pipeFunction"),e.ALo(11,"pipeFunction"),e._UZ(12,"i",91),e.ynx(13),e.SDv(14,113),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA()()(),e._UZ(17,"span",93),e.qZA()()()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.capabilities.length),e.xp6(1),e.Q6J("ngForOf",_.capabilities),e.xp6(3),e.Q6J("disabled",e.xi3(10,7,_.capabilities,_.hasAllCapabilities))("disableTooltip",!e.xi3(11,10,_.capabilities,_.hasAllCapabilities)),e.xp6(3),e.Q6J("ngClass",e.VKq(17,Z,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,13,_.actionLabels.ADD))(e.lcZ(16,15,_.capabilityLabel)),e.QtT(14)}}function yo(t,o){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",117),e.TgZ(4,"label",118),e.SDv(5,119),e.qZA()()()())}function Do(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,123),e.qZA())}function xo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,124),e.qZA())}function Zo(t,o){if(1&t&&(e.TgZ(0,"div",8)(1,"label",120),e.SDv(2,121),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",122),e.YNc(5,Do,2,0,"span",13),e.YNc(6,xo,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("user_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_quota_max_size",_,"quotaMaxSize"))}}function wo(t,o){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",125),e.TgZ(4,"label",126),e.SDv(5,127),e.qZA()()()())}function ko(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,131),e.qZA())}function qo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,132),e.qZA())}function Bo(t,o){if(1&t&&(e.TgZ(0,"div",8)(1,"label",128),e.SDv(2,129),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",130),e.YNc(5,ko,2,0,"span",13),e.YNc(6,qo,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("user_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_quota_max_objects",_,"min"))}}function Ho(t,o){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",133),e.TgZ(4,"label",134),e.SDv(5,135),e.qZA()()()())}function Xo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,139),e.qZA())}function Ko(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,140),e.qZA())}function zo(t,o){if(1&t&&(e.TgZ(0,"div",8)(1,"label",136),e.SDv(2,137),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",138),e.YNc(5,Xo,2,0,"span",13),e.YNc(6,Ko,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_size",_,"quotaMaxSize"))}}function Qo(t,o){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",141),e.TgZ(4,"label",142),e.SDv(5,143),e.qZA()()()())}function Yo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,147),e.qZA())}function Jo(t,o){1&t&&(e.TgZ(0,"span",50),e.SDv(1,148),e.qZA())}function Vo(t,o){if(1&t&&(e.TgZ(0,"div",8)(1,"label",144),e.SDv(2,145),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",146),e.YNc(5,Yo,2,0,"span",13),e.YNc(6,Jo,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),n=e.oxw();e.xp6(5),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("bucket_quota_max_objects",_,"min"))}}const r_=function(t){return{required:t}};function jo(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7)(9,"div",8)(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,ao,2,0,"span",13),e.YNc(15,ro,2,0,"span",13),e.YNc(16,lo,2,0,"span",13),e.qZA()(),e.TgZ(17,"div",8)(18,"div",14)(19,"div",15)(20,"input",16),e.NdJ("click",function(){return e.CHM(_),e.oxw().updateFieldsWhenTenanted()}),e.qZA(),e.TgZ(21,"label",17),e.SDv(22,18),e.qZA()()()(),e.YNc(23,Ro,7,3,"div",19),e.TgZ(24,"div",8)(25,"label",20),e.SDv(26,21),e.qZA(),e.TgZ(27,"div",11),e._UZ(28,"input",22),e.YNc(29,Eo,2,0,"span",13),e.YNc(30,go,2,0,"span",13),e.qZA()(),e.TgZ(31,"div",8)(32,"label",23),e.SDv(33,24),e.qZA(),e.TgZ(34,"div",11),e._UZ(35,"input",25),e.YNc(36,fo,2,0,"span",13),e.YNc(37,So,2,0,"span",13),e.qZA()(),e.TgZ(38,"div",8)(39,"label",26),e.SDv(40,27),e.qZA(),e.TgZ(41,"div",11)(42,"select",28),e.NdJ("change",function(i){return e.CHM(_),e.oxw().onMaxBucketsModeChange(i.target.value)}),e.TgZ(43,"option",29),e.SDv(44,30),e.qZA(),e.TgZ(45,"option",31),e.SDv(46,32),e.qZA(),e.TgZ(47,"option",33),e.SDv(48,34),e.qZA()()()(),e.YNc(49,Mo,6,2,"div",19),e.TgZ(50,"div",8)(51,"div",14)(52,"div",15),e._UZ(53,"input",35),e.TgZ(54,"label",36),e.SDv(55,37),e.qZA(),e.TgZ(56,"cd-helper"),e.SDv(57,38),e.qZA()()()(),e.YNc(58,Po,11,2,"fieldset",39),e.YNc(59,No,16,11,"fieldset",39),e.YNc(60,vo,25,13,"fieldset",39),e.YNc(61,Lo,18,19,"fieldset",39),e.TgZ(62,"fieldset")(63,"legend"),e.SDv(64,40),e.qZA(),e.TgZ(65,"div",8)(66,"div",14)(67,"div",15),e._UZ(68,"input",41),e.TgZ(69,"label",42),e.SDv(70,43),e.qZA()()()(),e.YNc(71,yo,6,0,"div",19),e.YNc(72,Zo,7,2,"div",19),e.YNc(73,wo,6,0,"div",19),e.YNc(74,Bo,7,2,"div",19),e.qZA(),e.TgZ(75,"fieldset")(76,"legend"),e.SDv(77,44),e.qZA(),e.TgZ(78,"div",8)(79,"div",14)(80,"div",15),e._UZ(81,"input",45),e.TgZ(82,"label",46),e.SDv(83,47),e.qZA()()()(),e.YNc(84,Ho,6,0,"div",19),e.YNc(85,zo,7,2,"div",19),e.YNc(86,Qo,6,0,"div",19),e.YNc(87,Vo,7,2,"div",19),e.qZA()(),e.TgZ(88,"div",48)(89,"cd-form-button-panel",49),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().onSubmit()}),e.ALo(90,"titlecase"),e.ALo(91,"upperFirst"),e.qZA()()()()()}if(2&t){const _=e.MAs(2),n=e.oxw();e.xp6(1),e.Q6J("formGroup",n.userForm),e.xp6(6),e.pQV(e.lcZ(6,30,n.action))(e.lcZ(7,32,n.resource)),e.QtT(5),e.xp6(3),e.Q6J("ngClass",e.VKq(38,r_,!n.editing)),e.xp6(3),e.Q6J("readonly",n.editing),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_id",_,"required")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("user_id",_,"pattern")),e.xp6(1),e.Q6J("ngIf",!n.userForm.getValue("show_tenant")&&n.userForm.showError("user_id",_,"notUnique")),e.xp6(4),e.Q6J("readonly",!0),e.xp6(3),e.Q6J("ngIf",n.userForm.getValue("show_tenant")),e.xp6(2),e.Q6J("ngClass",e.VKq(40,r_,!n.editing)),e.xp6(4),e.Q6J("ngIf",n.userForm.showError("display_name",_,"pattern")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("display_name",_,"required")),e.xp6(6),e.Q6J("ngIf",n.userForm.showError("email",_,"email")),e.xp6(1),e.Q6J("ngIf",n.userForm.showError("email",_,"notUnique")),e.xp6(12),e.Q6J("ngIf",1==n.userForm.get("max_buckets_mode").value),e.xp6(9),e.Q6J("ngIf",!n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(1),e.Q6J("ngIf",n.editing),e.xp6(10),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value&&!n.userForm.getValue("user_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.user_quota_enabled.value&&!n.userForm.getValue("user_quota_max_objects_unlimited")),e.xp6(10),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value&&!n.userForm.getValue("bucket_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",n.userForm.controls.bucket_quota_enabled.value&&!n.userForm.getValue("bucket_quota_max_objects_unlimited")),e.xp6(2),e.Q6J("form",n.userForm)("submitText",e.lcZ(90,34,n.action)+" "+e.lcZ(91,36,n.resource))}}let l_=(()=>{class t extends Ke.E{constructor(_,n,i,s,r,d,g){super(),this.formBuilder=_,this.route=n,this.router=i,this.rgwUserService=s,this.modalService=r,this.notificationService=d,this.actionLabels=g,this.editing=!1,this.submitObservables=[],this.icons=k.P,this.subusers=[],this.s3Keys=[],this.swiftKeys=[],this.capabilities=[],this.showTenant=!1,this.previousTenant=null,this.resource="user",this.subuserLabel="subuser",this.s3keyLabel="S3 Key",this.capabilityLabel="capability",this.editing=this.router.url.startsWith(`/rgw/user/${A.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.createForm()}createForm(){this.userForm=this.formBuilder.group({user_id:[null,[a.kI.required,a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[p.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("tenant"))]],show_tenant:[this.editing],tenant:[null,[a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[p.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("user_id"),!0)]],display_name:[null,[a.kI.required,a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_ -]+$/)]],email:[null,[p.h.email],[p.h.unique(this.rgwUserService.emailExists,this.rgwUserService)]],max_buckets_mode:[1],max_buckets:[1e3,[p.h.requiredIf({max_buckets_mode:"1"}),p.h.number(!1)]],suspended:[!1],generate_key:[!0],access_key:[null,[p.h.requiredIf({generate_key:!1})]],secret_key:[null,[p.h.requiredIf({generate_key:!1})]],user_quota_enabled:[!1],user_quota_max_size_unlimited:[!0],user_quota_max_size:[null,[p.h.composeIf({user_quota_enabled:!0,user_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],user_quota_max_objects_unlimited:[!0],user_quota_max_objects:[null,[p.h.requiredIf({user_quota_enabled:!0,user_quota_max_objects_unlimited:!1})]],bucket_quota_enabled:[!1],bucket_quota_max_size_unlimited:[!0],bucket_quota_max_size:[null,[p.h.composeIf({bucket_quota_enabled:!0,bucket_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],bucket_quota_max_objects_unlimited:[!0],bucket_quota_max_objects:[null,[p.h.requiredIf({bucket_quota_enabled:!0,bucket_quota_max_objects_unlimited:!1})]]})}ngOnInit(){this.route.params.subscribe(_=>{if(!_.hasOwnProperty("uid"))return void this.loadingReady();const n=decodeURIComponent(_.uid),i=[];i.push(this.rgwUserService.get(n)),i.push(this.rgwUserService.getQuota(n)),(0,_e.D)(i).subscribe(s=>{const r=R().clone(this.userForm.value);let d=R().pick(s[0],R().keys(this.userForm.value));switch(d.max_buckets){case-1:d.max_buckets_mode=-1,d.max_buckets="";break;case 0:d.max_buckets_mode=0,d.max_buckets="";break;default:d.max_buckets_mode=1}["user","bucket"].forEach(E=>{const S=s[1][E+"_quota"];d[E+"_quota_enabled"]=S.enabled,S.max_size<0?(d[E+"_quota_max_size_unlimited"]=!0,d[E+"_quota_max_size"]=null):(d[E+"_quota_max_size_unlimited"]=!1,d[E+"_quota_max_size"]=`${S.max_size} B`),S.max_objects<0?(d[E+"_quota_max_objects_unlimited"]=!0,d[E+"_quota_max_objects"]=null):(d[E+"_quota_max_objects_unlimited"]=!1,d[E+"_quota_max_objects"]=S.max_objects)}),d=R().merge(r,d),this.userForm.setValue(d),this.subusers=s[0].subusers,this.s3Keys=s[0].keys,this.swiftKeys=s[0].swift_keys;const g={"read, write":"*"};s[0].caps.forEach(E=>{E.perm in g&&(E.perm=g[E.perm])}),this.capabilities=s[0].caps,this.loadingReady()},()=>{this.loadingError()})})}goToListView(){this.router.navigate(["/rgw/user"])}onSubmit(){let _;if(this.userForm.pristine)return void this.goToListView();const n=this.getUID();if(this.editing){if(this._isGeneralDirty()){const i=this._getUpdateArgs();this.submitObservables.push(this.rgwUserService.update(n,i))}_="Updated Object Gateway user '" + n + "'"}else{const i=this._getCreateArgs();this.submitObservables.push(this.rgwUserService.create(i)),_="Created Object Gateway user '" + n + "'"}if(this._isUserQuotaDirty()){const i=this._getUserQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(n,i))}if(this._isBucketQuotaDirty()){const i=this._getBucketQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(n,i))}(0,bn.z)(...this.submitObservables).subscribe({error:()=>{this.userForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.notificationService.show(te.k.success,_),this.goToListView()}})}updateFieldsWhenTenanted(){this.showTenant=this.userForm.getValue("show_tenant"),this.showTenant?(this.userForm.get("user_id").markAsTouched(),this.previousTenant=this.userForm.get("tenant").value,this.userForm.get("tenant").patchValue(null)):(this.userForm.get("user_id").markAsUntouched(),this.userForm.get("tenant").patchValue(this.previousTenant))}getUID(){var _;let n=this.userForm.getValue("user_id");const i=null===(_=this.userForm)||void 0===_?void 0:_.getValue("tenant");return i&&i.length>0&&(n=`${this.userForm.getValue("tenant")}$${n}`),n}quotaMaxSizeValidator(_){return(0,p.P)(_.value)?null:null===RegExp("^(\\d+(\\.\\d+)?)\\s*(B|K(B|iB)?|M(B|iB)?|G(B|iB)?|T(B|iB)?)?$","i").exec(_.value)||(new xe.H).toBytes(_.value)<1024?{quotaMaxSize:!0}:null}setSubuser(_,n){const i={"full-control":"full","read-write":"readwrite"},s=this.getUID();this.submitObservables.push(this.rgwUserService.createSubuser(s,{subuser:_.id,access:_.permissions in i?i[_.permissions]:_.permissions,key_type:"swift",secret_key:_.secret_key,generate_secret:_.generate_secret?"true":"false"})),R().isNumber(n)?this.subusers[n]=_:(this.subusers.push(_),this.swiftKeys.push({user:_.id,secret_key:_.generate_secret?"Apply your changes first...":_.secret_key})),this.userForm.markAsDirty()}deleteSubuser(_){const n=this.subusers[_];this.submitObservables.push(this.rgwUserService.deleteSubuser(this.getUID(),n.id)),this.s3Keys=this.s3Keys.filter(i=>i.user!==n.id),this.swiftKeys=this.swiftKeys.filter(i=>i.user!==n.id),this.subusers.splice(_,1),this.userForm.markAsDirty()}setCapability(_,n){const i=this.getUID();if(R().isNumber(n)){const s=this.capabilities[n];this.submitObservables.push(this.rgwUserService.deleteCapability(i,s.type,s.perm)),this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities[n]=_}else this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities=[...this.capabilities,_];this.userForm.markAsDirty()}deleteCapability(_){const n=this.capabilities[_];this.submitObservables.push(this.rgwUserService.deleteCapability(this.getUID(),n.type,n.perm)),this.capabilities.splice(_,1),this.capabilities=[...this.capabilities],this.userForm.markAsDirty()}hasAllCapabilities(_){return!R().difference(i_.getAll(),R().map(_,"type")).length}setS3Key(_,n){if(!R().isNumber(n)){const i=_.user.match(/([^:]+)(:(.+))?/),s=i[1],r={subuser:i[2]?i[3]:"",generate_key:_.generate_key?"true":"false"};"false"===r.generate_key&&(R().isNil(_.access_key)||(r.access_key=_.access_key),R().isNil(_.secret_key)||(r.secret_key=_.secret_key)),this.submitObservables.push(this.rgwUserService.addS3Key(s,r)),this.s3Keys.push({user:_.user,access_key:_.generate_key?"Apply your changes first...":_.access_key,secret_key:_.generate_key?"Apply your changes first...":_.secret_key})}this.userForm.markAsDirty()}deleteS3Key(_){const n=this.s3Keys[_];this.submitObservables.push(this.rgwUserService.deleteS3Key(this.getUID(),n.access_key)),this.s3Keys.splice(_,1),this.userForm.markAsDirty()}showSubuserModal(_){const n=this.getUID(),i=this.modalService.show(oo);if(R().isNumber(_)){const s=this.subusers[_];i.componentInstance.setEditing(),i.componentInstance.setValues(n,s.id,s.permissions)}else i.componentInstance.setEditing(!1),i.componentInstance.setValues(n),i.componentInstance.setSubusers(this.subusers);i.componentInstance.submitAction.subscribe(s=>{this.setSubuser(s,_)})}showS3KeyModal(_){const n=this.modalService.show(s_);if(R().isNumber(_)){const i=this.s3Keys[_];n.componentInstance.setViewing(),n.componentInstance.setValues(i.user,i.access_key,i.secret_key)}else{const i=this._getS3KeyUserCandidates();n.componentInstance.setViewing(!1),n.componentInstance.setUserCandidates(i),n.componentInstance.submitAction.subscribe(s=>{this.setS3Key(s)})}}showSwiftKeyModal(_){const n=this.modalService.show(a_),i=this.swiftKeys[_];n.componentInstance.setValues(i.user,i.secret_key)}showCapabilityModal(_){const n=this.modalService.show(yn);if(R().isNumber(_)){const i=this.capabilities[_];n.componentInstance.setEditing(),n.componentInstance.setValues(i.type,i.perm)}else n.componentInstance.setEditing(!1),n.componentInstance.setCapabilities(this.capabilities);n.componentInstance.submitAction.subscribe(i=>{this.setCapability(i,_)})}_isGeneralDirty(){return["display_name","email","max_buckets_mode","max_buckets","suspended"].some(_=>this.userForm.get(_).dirty)}_isUserQuotaDirty(){return["user_quota_enabled","user_quota_max_size_unlimited","user_quota_max_size","user_quota_max_objects_unlimited","user_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_isBucketQuotaDirty(){return["bucket_quota_enabled","bucket_quota_max_size_unlimited","bucket_quota_max_size","bucket_quota_max_objects_unlimited","bucket_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_getCreateArgs(){const _={uid:this.getUID(),display_name:this.userForm.getValue("display_name"),suspended:this.userForm.getValue("suspended"),email:"",max_buckets:this.userForm.getValue("max_buckets"),generate_key:this.userForm.getValue("generate_key"),access_key:"",secret_key:""},n=this.userForm.getValue("email");R().isString(n)&&n.length>0&&R().merge(_,{email:n}),this.userForm.getValue("generate_key")||R().merge(_,{generate_key:!1,access_key:this.userForm.getValue("access_key"),secret_key:this.userForm.getValue("secret_key")});const s=parseInt(this.userForm.getValue("max_buckets_mode"),10);return R().includes([-1,0],s)&&R().merge(_,{max_buckets:s}),_}_getUpdateArgs(){const _={},n=["display_name","email","max_buckets","suspended"];for(const s of n)_[s]=this.userForm.getValue(s);const i=parseInt(this.userForm.getValue("max_buckets_mode"),10);return R().includes([-1,0],i)&&(_.max_buckets=i),_}_getUserQuotaArgs(){const _={quota_type:"user",enabled:this.userForm.getValue("user_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("user_quota_max_size_unlimited")){const n=(new xe.H).toBytes(this.userForm.getValue("user_quota_max_size"));_.max_size_kb=(n/1024).toFixed(0)}return this.userForm.getValue("user_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("user_quota_max_objects")),_}_getBucketQuotaArgs(){const _={quota_type:"bucket",enabled:this.userForm.getValue("bucket_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("bucket_quota_max_size_unlimited")){const n=(new xe.H).toBytes(this.userForm.getValue("bucket_quota_max_size"));_.max_size_kb=(n/1024).toFixed(0)}return this.userForm.getValue("bucket_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("bucket_quota_max_objects")),_}_getS3KeyUserCandidates(){let _=[];const n=this.getUID();return R().isString(n)&&!R().isEmpty(n)&&_.push(n),this.subusers.forEach(i=>{_.push(i.id)}),this.s3Keys.forEach(i=>{_.push(i.user)}),_=R().uniq(_),_}onMaxBucketsModeChange(_){"1"===_&&(this.userForm.get("max_buckets").valid||this.userForm.patchValue({max_buckets:1e3}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Q.O),e.Y36(w.gz),e.Y36(w.F0),e.Y36(D),e.Y36(oe.Z),e.Y36(he.g),e.Y36(A.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let o,_,n,i,s,r,d,g,E,S,G,P,b,N,m,W,U,$,I,v,F,h,L,y,f,x,T,B,C,se,ae,re,le,ce,de,ue,Re,Ee,ge,fe,Se,Te,Ce,Me,pe,me,Ae,Ge,Pe,Oe,u,R_,E_,g_,f_,S_,T_,C_,M_,p_,m_,A_,G_,P_,O_,b_,N_,W_,U_;return o="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="User ID",n="Show Tenant",i="Full name",s="Email address",r="Max. buckets",d="Disabled",g="Unlimited",E="Custom",S="Suspended",G="Suspending the user disables the user and subuser.",P="User quota",b="Enabled",N="Bucket quota",m="Enabled",W="This field is required.",U="The value is not valid.",$="The chosen user ID is already in use.",I="Tenant",v="The value is not valid.",F="The chosen user ID exists in this tenant.",h="The value is not valid.",L="This field is required.",y="This is not a valid email address.",f="The chosen email address is already in use.",x="This field is required.",T="The entered value must be >= 1.",B="S3 key",C="Auto-generate key",se="Access key",ae="This field is required.",re="Secret key",le="This field is required.",ce="Subusers",de="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",ue="There are no subusers.",Re="Edit",Ee="Delete",ge="Keys",fe="S3",Se="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",Te="Swift",Ce="There are no keys.",Me="Show",pe="Delete",me="There are no keys.",Ae="Show",Ge="Capabilities",Pe="All capabilities are already added.",Oe="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",u="There are no capabilities.",R_="Edit",E_="Delete",g_="Unlimited size",f_="Max. size",S_="This field is required.",T_="The value is not valid.",C_="Unlimited objects",M_="Max. objects",p_="This field is required.",m_="The entered value must be >= 0.",A_="Unlimited size",G_="Max. size",P_="This field is required.",O_="The value is not valid.",b_="Unlimited objects",N_="Max. objects",W_="This field is required.",U_="The entered value must be >= 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],o,[1,"card-body"],[1,"form-group","row"],["for","user_id",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user_id","type","text","formControlName","user_id",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","show_tenant","type","checkbox","formControlName","show_tenant",1,"custom-control-input",3,"readonly","click"],["for","show_tenant",1,"custom-control-label"],n,["class","form-group row",4,"ngIf"],["for","display_name",1,"cd-col-form-label",3,"ngClass"],i,["id","display_name","type","text","formControlName","display_name",1,"form-control"],["for","email",1,"cd-col-form-label"],s,["id","email","type","text","formControlName","email",1,"form-control"],["for","max_buckets_mode",1,"cd-col-form-label"],r,["formControlName","max_buckets_mode","name","max_buckets_mode","id","max_buckets_mode",1,"form-select",3,"change"],["value","-1"],d,["value","0"],g,["value","1"],E,["id","suspended","type","checkbox","formControlName","suspended",1,"custom-control-input"],["for","suspended",1,"custom-control-label"],S,G,[4,"ngIf"],P,["id","user_quota_enabled","type","checkbox","formControlName","user_quota_enabled",1,"custom-control-input"],["for","user_quota_enabled",1,"custom-control-label"],b,N,["id","bucket_quota_enabled","type","checkbox","formControlName","bucket_quota_enabled",1,"custom-control-input"],["for","bucket_quota_enabled",1,"custom-control-label"],m,[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],W,U,$,["for","tenant",1,"cd-col-form-label"],I,["id","tenant","type","text","formControlName","tenant","autofocus","",1,"form-control",3,"readonly"],v,F,h,L,y,f,[1,"cd-col-form-label"],["id","max_buckets","type","number","formControlName","max_buckets","min","1",1,"form-control"],x,T,B,["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],C,["for","access_key",1,"cd-col-form-label","required"],se,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],ae,["for","secret_key",1,"cd-col-form-label","required"],re,["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],le,ce,[1,"row"],["class","no-border",4,"ngIf"],[4,"ngFor","ngForOf"],[1,"row","my-2"],[1,"col-12"],["type","button",1,"btn","btn-light","float-end","tc_addSubuserButton",3,"click"],[3,"ngClass"],de,[1,"help-block"],[1,"no-border"],[1,"form-text","text-muted"],ue,[1,"input-group-text"],["type","text","readonly","",1,"cd-form-control",3,"value"],["type","button","ngbTooltip",Re,1,"btn","btn-light","tc_showSubuserButton",3,"click"],["type","button","ngbTooltip",Ee,1,"btn","btn-light","tc_deleteSubuserButton",3,"click"],ge,fe,["type","button",1,"btn","btn-light","float-end","tc_addS3KeyButton",3,"click"],Se,Te,Ce,["type","button","ngbTooltip",Me,1,"btn","btn-light","tc_showS3KeyButton",3,"click"],["type","button","ngbTooltip",pe,1,"btn","btn-light","tc_deleteS3KeyButton",3,"click"],me,["type","button","ngbTooltip",Ae,1,"btn","btn-light","tc_showSwiftKeyButton",3,"click"],Ge,["type","button","ngbTooltip",Pe,"triggers","pointerenter:pointerleave",1,"btn","btn-light","float-end","tc_addCapButton",3,"disabled","disableTooltip","click"],Oe,u,["type","button","ngbTooltip",R_,1,"btn","btn-light","tc_editCapButton",3,"click"],["type","button","ngbTooltip",E_,1,"btn","btn-light","tc_deleteCapButton",3,"click"],["id","user_quota_max_size_unlimited","type","checkbox","formControlName","user_quota_max_size_unlimited",1,"custom-control-input"],["for","user_quota_max_size_unlimited",1,"custom-control-label"],g_,["for","user_quota_max_size",1,"cd-col-form-label","required"],f_,["id","user_quota_max_size","type","text","formControlName","user_quota_max_size","cdDimlessBinary","",1,"form-control"],S_,T_,["id","user_quota_max_objects_unlimited","type","checkbox","formControlName","user_quota_max_objects_unlimited",1,"custom-control-input"],["for","user_quota_max_objects_unlimited",1,"custom-control-label"],C_,["for","user_quota_max_objects",1,"cd-col-form-label","required"],M_,["id","user_quota_max_objects","type","number","formControlName","user_quota_max_objects","min","0",1,"form-control"],p_,m_,["id","bucket_quota_max_size_unlimited","type","checkbox","formControlName","bucket_quota_max_size_unlimited",1,"custom-control-input"],["for","bucket_quota_max_size_unlimited",1,"custom-control-label"],A_,["for","bucket_quota_max_size",1,"cd-col-form-label","required"],G_,["id","bucket_quota_max_size","type","text","formControlName","bucket_quota_max_size","cdDimlessBinary","",1,"form-control"],P_,O_,["id","bucket_quota_max_objects_unlimited","type","checkbox","formControlName","bucket_quota_max_objects_unlimited",1,"custom-control-input"],["for","bucket_quota_max_objects_unlimited",1,"custom-control-label"],b_,["for","bucket_quota_max_objects",1,"cd-col-form-label","required"],N_,["id","bucket_quota_max_objects","type","number","formControlName","bucket_quota_max_objects","min","0",1,"form-control"],W_,U_]},template:function(_,n){1&_&&e.YNc(0,jo,92,42,"div",0),2&_&&e.Q6J("cdFormLoading",n.loading)},directives:[ze.y,a._Y,a.JL,Y.V,a.sg,H.P,M.mk,q.o,a.Fj,X.b,a.JJ,a.u,M.O5,a.Wl,ie.U,a.EJ,a.YN,a.Kr,a.wV,a.qQ,Le.S,Ie.C,ve.s,M.sg,O._L,so.Q,J.p],pipes:[M.rS,V.m,ke.i],styles:[""]}),t})();var c_=l(99466);const d_=function(){return{exact:!0}};let ei=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-tabs"]],decls:7,vars:4,consts:function(){let o,_;return o="Users",_="Roles",[[1,"nav","nav-tabs"],[1,"nav-item"],["routerLink","/rgw/user","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],o,["routerLink","/rgw/roles","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],_]},template:function(_,n){1&_&&(e.TgZ(0,"ul",0)(1,"li",1)(2,"a",2),e.SDv(3,3),e.qZA()(),e.TgZ(4,"li",1)(5,"a",4),e.SDv(6,5),e.qZA()()()),2&_&&(e.xp6(2),e.Q6J("routerLinkActiveOptions",e.DdM(2,d_)),e.xp6(3),e.Q6J("routerLinkActiveOptions",e.DdM(3,d_)))},directives:[w.yS,w.Od],styles:[""]}),t})();var _i=l(78877),ti=l(86969);const ni=["accessKeyTpl"],oi=["secretKeyTpl"],ii=function(t){return[t]};function si(t,o){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"legend"),e.SDv(2,13),e.qZA(),e.TgZ(3,"div")(4,"cd-table",14),e.NdJ("updateSelection",function(i){return e.CHM(_),e.oxw(3).updateKeysSelection(i)}),e.TgZ(5,"div",15)(6,"div",16)(7,"button",17),e.NdJ("click",function(){return e.CHM(_),e.oxw(3).showKeyModal()}),e._UZ(8,"i",18),e.ynx(9),e.SDv(10,19),e.BQk(),e.qZA()()()()()()}if(2&t){const _=e.oxw(3);e.xp6(4),e.Q6J("data",_.keys)("columns",_.keysColumns),e.xp6(3),e.Q6J("disabled",!_.keysSelection.hasSingleSelection),e.xp6(1),e.Q6J("ngClass",e.VKq(4,ii,_.icons.show))}}function ai(t,o){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,20),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Oqu(_.user.email)}}function ri(t,o){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.xp6(1),e.AsE(" ",_.id," (",_.permissions,") ")}}function li(t,o){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,21),e.qZA(),e.TgZ(3,"td"),e.YNc(4,ri,2,2,"div",22),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Q6J("ngForOf",_.user.subusers)}}function ci(t,o){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=o.$implicit;e.xp6(1),e.AsE(" ",_.type," (",_.perm,") ")}}function di(t,o){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,23),e.qZA(),e.TgZ(3,"td"),e.YNc(4,ci,2,2,"div",22),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Q6J("ngForOf",_.user.caps)}}function ui(t,o){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,24),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.ALo(5,"join"),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Oqu(e.lcZ(5,1,_.user.mfa_ids))}}function Ri(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Ei(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,29),e.qZA())}function gi(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.user_quota.max_size)," ")}}function fi(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Si(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,30),e.qZA())}function Ti(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",_.user.user_quota.max_objects," ")}}function Ci(t,o){if(1&t&&(e.TgZ(0,"div")(1,"legend"),e.SDv(2,25),e.qZA(),e.TgZ(3,"table",2)(4,"tbody")(5,"tr")(6,"td",3),e.SDv(7,26),e.qZA(),e.TgZ(8,"td",5),e._uU(9),e.ALo(10,"booleanText"),e.qZA()(),e.TgZ(11,"tr")(12,"td",8),e.SDv(13,27),e.qZA(),e.YNc(14,Ri,2,0,"td",0),e.YNc(15,Ei,2,0,"td",0),e.YNc(16,gi,3,3,"td",0),e.qZA(),e.TgZ(17,"tr")(18,"td",8),e.SDv(19,28),e.qZA(),e.YNc(20,fi,2,0,"td",0),e.YNc(21,Si,2,0,"td",0),e.YNc(22,Ti,2,1,"td",0),e.qZA()()()()),2&t){const _=e.oxw(3);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.user_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects>-1)}}function Mi(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function pi(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,35),e.qZA())}function mi(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.bucket_quota.max_size)," ")}}function Ai(t,o){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Gi(t,o){1&t&&(e.TgZ(0,"td"),e.SDv(1,36),e.qZA())}function Pi(t,o){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",_.user.bucket_quota.max_objects," ")}}function Oi(t,o){if(1&t&&(e.TgZ(0,"div")(1,"legend"),e.SDv(2,31),e.qZA(),e.TgZ(3,"table",2)(4,"tbody")(5,"tr")(6,"td",3),e.SDv(7,32),e.qZA(),e.TgZ(8,"td",5),e._uU(9),e.ALo(10,"booleanText"),e.qZA()(),e.TgZ(11,"tr")(12,"td",8),e.SDv(13,33),e.qZA(),e.YNc(14,Mi,2,0,"td",0),e.YNc(15,pi,2,0,"td",0),e.YNc(16,mi,3,3,"td",0),e.qZA(),e.TgZ(17,"tr")(18,"td",8),e.SDv(19,34),e.qZA(),e.YNc(20,Ai,2,0,"td",0),e.YNc(21,Gi,2,0,"td",0),e.YNc(22,Pi,2,1,"td",0),e.qZA()()()()),2&t){const _=e.oxw(3);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.bucket_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects>-1)}}function bi(t,o){if(1&t&&(e.TgZ(0,"div"),e.YNc(1,si,11,6,"div",0),e.TgZ(2,"legend"),e.SDv(3,1),e.qZA(),e.TgZ(4,"table",2)(5,"tbody")(6,"tr")(7,"td",3),e.SDv(8,4),e.qZA(),e.TgZ(9,"td",5),e._uU(10),e.qZA()(),e.TgZ(11,"tr")(12,"td",3),e.SDv(13,6),e.qZA(),e.TgZ(14,"td",5),e._uU(15),e.qZA()(),e.TgZ(16,"tr")(17,"td",3),e.SDv(18,7),e.qZA(),e.TgZ(19,"td",5),e._uU(20),e.qZA()(),e.TgZ(21,"tr")(22,"td",8),e.SDv(23,9),e.qZA(),e.TgZ(24,"td"),e._uU(25),e.qZA()(),e.YNc(26,ai,5,1,"tr",0),e.TgZ(27,"tr")(28,"td",8),e.SDv(29,10),e.qZA(),e.TgZ(30,"td"),e._uU(31),e.ALo(32,"booleanText"),e.qZA()(),e.TgZ(33,"tr")(34,"td",8),e.SDv(35,11),e.qZA(),e.TgZ(36,"td"),e._uU(37),e.ALo(38,"booleanText"),e.qZA()(),e.TgZ(39,"tr")(40,"td",8),e.SDv(41,12),e.qZA(),e.TgZ(42,"td"),e._uU(43),e.ALo(44,"map"),e.qZA()(),e.YNc(45,li,5,1,"tr",0),e.YNc(46,di,5,1,"tr",0),e.YNc(47,ui,6,3,"tr",0),e.qZA()(),e.YNc(48,Ci,23,9,"div",0),e.YNc(49,Oi,23,9,"div",0),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.Q6J("ngIf",_.keys.length),e.xp6(9),e.Oqu(_.user.tenant),e.xp6(5),e.Oqu(_.user.user_id),e.xp6(5),e.Oqu(_.user.uid),e.xp6(5),e.Oqu(_.user.display_name),e.xp6(1),e.Q6J("ngIf",null==_.user.email?null:_.user.email.length),e.xp6(5),e.Oqu(e.lcZ(32,14,_.user.suspended)),e.xp6(6),e.Oqu(e.lcZ(38,16,"true"===_.user.system)),e.xp6(6),e.Oqu(e.xi3(44,18,_.user.max_buckets,_.maxBucketsMap)),e.xp6(2),e.Q6J("ngIf",_.user.subusers&&_.user.subusers.length),e.xp6(1),e.Q6J("ngIf",_.user.caps&&_.user.caps.length),e.xp6(1),e.Q6J("ngIf",null==_.user.mfa_ids?null:_.user.mfa_ids.length),e.xp6(1),e.Q6J("ngIf",_.user.user_quota),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota)}}function Ni(t,o){if(1&t&&(e.ynx(0),e.YNc(1,bi,50,21,"div",0),e.BQk()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",_.user)}}let Wi=(()=>{class t{constructor(_,n){this.rgwUserService=_,this.modalService=n,this.keys=[],this.keysColumns=[],this.keysSelection=new De.r,this.icons=k.P}ngOnInit(){this.keysColumns=[{name:"Username",prop:"username",flexGrow:1},{name:"Type",prop:"type",flexGrow:1}],this.maxBucketsMap={"-1":"Disabled",0:"Unlimited"}}ngOnChanges(){this.selection&&(this.user=this.selection,this.user.subusers=R().sortBy(this.user.subusers,"id"),this.user.caps=R().sortBy(this.user.caps,"type"),this.rgwUserService.getQuota(this.user.uid).subscribe(_=>{R().extend(this.user,_)}),this.keys=[],this.user.keys&&this.user.keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"S3",username:_.user,ref:_})}),this.user.swift_keys&&this.user.swift_keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"Swift",username:_.user,ref:_})}),this.keys=R().sortBy(this.keys,"user"))}updateKeysSelection(_){this.keysSelection=_}showKeyModal(){const _=this.keysSelection.first(),n=this.modalService.show("S3"===_.type?s_:a_);switch(_.type){case"S3":n.componentInstance.setViewing(),n.componentInstance.setValues(_.ref.user,_.ref.access_key,_.ref.secret_key);break;case"Swift":n.componentInstance.setValues(_.ref.user,_.ref.secret_key)}}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(D),e.Y36(oe.Z))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-details"]],viewQuery:function(_,n){if(1&_&&(e.Gf(ni,5),e.Gf(oi,5)),2&_){let i;e.iGM(i=e.CRH())&&(n.accessKeyTpl=i.first),e.iGM(i=e.CRH())&&(n.secretKeyTpl=i.first)}},inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let o,_,n,i,s,r,d,g,E,S,G,P,b,N,m,W,U,$,I,v,F,h,L,y,f,x;return o="Details",_="Tenant",n="User ID",i="Username",s="Full name",r="Suspended",d="System",g="Maximum buckets",E="Keys",S="Show",G="Email address",P="Subusers",b="Capabilities",N="MFAs(Id)",m="User quota",W="Enabled",U="Maximum size",$="Maximum objects",I="Unlimited",v="Unlimited",F="Bucket quota",h="Enabled",L="Maximum size",y="Maximum objects",f="Unlimited",x="Unlimited",[[4,"ngIf"],o,[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],_,[1,"w-75"],n,i,[1,"bold"],s,r,d,g,E,["columnMode","flex","selectionType","multi","forceIdentifier","true",3,"data","columns","updateSelection"],[1,"table-actions"],["dropdown","",1,"btn-group"],["type","button",1,"btn","btn-accent",3,"disabled","click"],[3,"ngClass"],S,G,P,[4,"ngFor","ngForOf"],b,N,m,W,U,$,I,v,F,h,L,y,f,x]},template:function(_,n){1&_&&e.YNc(0,Ni,2,1,"ng-container",0),2&_&&e.Q6J("ngIf",n.selection)},directives:[M.O5,j.a,q.o,M.mk,M.sg],pipes:[t_.T,_i.b,ti.A,je.$],styles:[""]}),t})();const Ui=["userSizeTpl"],$i=["userObjectTpl"];function Ii(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_size)("used",_.stats.size_actual)}}function vi(t,o){1&t&&e.SDv(0,9)}function Fi(t,o){if(1&t&&(e.YNc(0,Ii,1,2,"cd-usage-bar",6),e.YNc(1,vi,1,0,"ng-template",null,7,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_size>0&&_.user_quota.enabled)("ngIfElse",n)}}function hi(t,o){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_objects)("used",_.stats.num_objects)("isBinary",!1)}}function Li(t,o){1&t&&e.SDv(0,13)}function yi(t,o){if(1&t&&(e.YNc(0,hi,1,3,"cd-usage-bar",10),e.YNc(1,Li,1,0,"ng-template",null,11,e.W1O)),2&t){const _=o.row,n=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_objects>0&&_.user_quota.enabled)("ngIfElse",n)}}let xi=(()=>{class t extends ye.o{constructor(_,n,i,s,r,d){super(d),this.authStorageService=_,this.rgwUserService=n,this.modalService=i,this.urlBuilder=s,this.actionLabels=r,this.ngZone=d,this.columns=[],this.users=[],this.selection=new De.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Username",prop:"uid",flexGrow:1},{name:"Tenant",prop:"tenant",flexGrow:1},{name:"Full name",prop:"display_name",flexGrow:1},{name:"Email address",prop:"email",flexGrow:1},{name:"Suspended",prop:"suspended",flexGrow:1,cellClass:"text-center",cellTransformation:c_.e.checkIcon},{name:"Max. buckets",prop:"max_buckets",flexGrow:1,cellTransformation:c_.e.map,customTemplateConfig:{"-1":"Disabled",0:"Unlimited"}},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.userSizeTpl,flexGrow:.8},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.userObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().uid)}`;this.tableActions=[{permission:"create",icon:k.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:r=>!r.hasSelection},{permission:"update",icon:k.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:k.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:r=>r.hasMultiSelection}],this.setTableRefreshTimeout()}getUserList(_){this.setTableRefreshTimeout(),this.rgwUserService.list().subscribe(n=>{this.users=n},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(Ve.M,{itemDescription:this.selection.hasSingleSelection?"user":"users",itemNames:this.selection.selected.map(_=>_.uid),submitActionObservable:()=>new Je.y(_=>{(0,_e.D)(this.selection.selected.map(n=>this.rgwUserService.delete(n.uid))).subscribe({error:n=>{_.error(n),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ue.j),e.Y36(D),e.Y36(oe.Z),e.Y36(ee.F),e.Y36(A.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-list"]],viewQuery:function(_,n){if(1&_&&(e.Gf(j.a,7),e.Gf(Ui,7),e.Gf($i,7)),2&_){let i;e.iGM(i=e.CRH())&&(n.table=i.first),e.iGM(i=e.CRH())&&(n.userSizeTpl=i.first),e.iGM(i=e.CRH())&&(n.userObjectTpl=i.first)}},features:[e._Bn([{provide:ee.F,useValue:new ee.F("rgw/user")}]),e.qOj],decls:9,vars:9,consts:function(){let o,_;return o="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","uid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["userSizeTpl",""],["userObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],o,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,n){1&_&&(e._UZ(0,"cd-rgw-user-tabs"),e.TgZ(1,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return n.setExpandedRow(s)})("updateSelection",function(s){return n.updateSelection(s)})("fetchData",function(s){return n.getUserList(s)}),e._UZ(3,"cd-table-actions",2)(4,"cd-rgw-user-details",3),e.qZA(),e.YNc(5,Fi,3,2,"ng-template",null,4,e.W1O),e.YNc(7,yi,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.xp6(1),e.Q6J("autoReload",!1)("data",n.users)("columns",n.columns)("hasDetails",!0)("status",n.tableStatus),e.xp6(2),e.Q6J("permission",n.permission)("selection",n.selection)("tableActions",n.tableActions),e.xp6(1),e.Q6J("selection",n.expandedRow))},directives:[ei,j.a,__.K,Wi,M.O5,n_.O],styles:[""]}),t})();var Zi=l(83357);let u_=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[M.ez,I_.m,a.u5,a.UX,v_.B,O.Oz,w.Bz,O.HK,ke.b]]}),t})();const wi=[{path:""},{path:"daemon",component:On,data:{breadcrumbs:"Gateways"}},{path:"user",data:{breadcrumbs:"Users"},children:[{path:"",component:xi},{path:A.MQ.CREATE,component:l_,data:{breadcrumbs:A.Qn.CREATE}},{path:`${A.MQ.EDIT}/:uid`,component:l_,data:{breadcrumbs:A.Qn.EDIT}}]},{path:"roles",data:{breadcrumbs:"Roles",resource:"api.rgw.roles@1.0",tabs:[{name:"Users",url:"/rgw/user"},{name:"Roles",url:"/rgw/roles"}]},children:[{path:"",component:$_.c},{path:A.MQ.CREATE,component:Zi.U,data:{breadcrumbs:A.Qn.CREATE}}]},{path:"bucket",data:{breadcrumbs:"Buckets"},children:[{path:"",component:cn},{path:A.MQ.CREATE,component:Ye,data:{breadcrumbs:A.Qn.CREATE}},{path:`${A.MQ.EDIT}/:bid`,component:Ye,data:{breadcrumbs:A.Qn.EDIT}}]}];let ki=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[u_,w.Bz.forChild(wi)]]}),t})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/803.08339784f3bb5d16.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/803.08339784f3bb5d16.js new file mode 100644 index 000000000..067c61f3e --- /dev/null +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/803.08339784f3bb5d16.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[803],{77803:(Dr,t_,c)=>{c.r(t_),c.d(t_,{RgwModule:()=>L_,RoutedRgwModule:()=>Zr});var T=c(88692),a=c(20092),J=c(54247),G=c(51389),o_=c(37496),I=c(79512),j_=c(4268),et=c(44466),_t=c(66265),tt=c(23815),E=c.n(tt),ne=c(35758),Ee=c(95152),n_=c(33394),xe=c(64762),i_=c(35732),ke=c(25917),s_=c(19773),ot=c(96736),a_=c(5304),oe=c(20523),nt=c(93523),e=c(64537);let Q=class{constructor(n,_){this.http=n,this.rgwDaemonService=_,this.url="api/rgw/user"}list(){return this.enumerate().pipe((0,s_.zg)(n=>n.length>0?(0,ne.D)(n.map(_=>this.get(_))):(0,ke.of)([])))}enumerate(){return this.rgwDaemonService.request(n=>this.http.get(this.url,{params:n}))}enumerateEmail(){return this.rgwDaemonService.request(n=>this.http.get(`${this.url}/get_emails`,{params:n}))}get(n){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${n}`,{params:_}))}getQuota(n){return this.rgwDaemonService.request(_=>this.http.get(`${this.url}/${n}/quota`,{params:_}))}create(n){return this.rgwDaemonService.request(_=>(E().keys(n).forEach(o=>{_=_.append(o,n[o])}),this.http.post(this.url,null,{params:_})))}update(n,_){return this.rgwDaemonService.request(o=>(E().keys(_).forEach(i=>{o=o.append(i,_[i])}),this.http.put(`${this.url}/${n}`,null,{params:o})))}updateQuota(n,_){return this.rgwDaemonService.request(o=>(E().keys(_).forEach(i=>{o=o.append(i,_[i])}),this.http.put(`${this.url}/${n}/quota`,null,{params:o})))}delete(n){return this.rgwDaemonService.request(_=>this.http.delete(`${this.url}/${n}`,{params:_}))}createSubuser(n,_){return this.rgwDaemonService.request(o=>(E().keys(_).forEach(i=>{o=o.append(i,_[i])}),this.http.post(`${this.url}/${n}/subuser`,null,{params:o})))}deleteSubuser(n,_){return this.rgwDaemonService.request(o=>this.http.delete(`${this.url}/${n}/subuser/${_}`,{params:o}))}addCapability(n,_,o){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",o),this.http.post(`${this.url}/${n}/capability`,null,{params:i})))}deleteCapability(n,_,o){return this.rgwDaemonService.request(i=>(i=(i=i.append("type",_)).append("perm",o),this.http.delete(`${this.url}/${n}/capability`,{params:i})))}addS3Key(n,_){return this.rgwDaemonService.request(o=>(o=o.append("key_type","s3"),E().keys(_).forEach(i=>{o=o.append(i,_[i])}),this.http.post(`${this.url}/${n}/key`,null,{params:o})))}deleteS3Key(n,_){return this.rgwDaemonService.request(o=>(o=(o=o.append("key_type","s3")).append("access_key",_),this.http.delete(`${this.url}/${n}/key`,{params:o})))}exists(n){return this.get(n).pipe((0,ot.h)(!0),(0,a_.K)(_=>(E().isFunction(_.preventDefault)&&_.preventDefault(),(0,ke.of)(!1))))}emailExists(n){return n=decodeURIComponent(n),this.enumerateEmail().pipe((0,s_.zg)(_=>{const o=E().indexOf(_,n);return(0,ke.of)(-1!==o)}))}};Q.\u0275fac=function(n){return new(n||Q)(e.LFG(i_.eN),e.LFG(oe.b))},Q.\u0275prov=e.Yz7({token:Q,factory:Q.\u0275fac,providedIn:"root"}),Q=(0,xe.gn)([nt.o,(0,xe.w6)("design:paramtypes",[i_.eN,oe.b])],Q);var $=c(65862),w=c(18001),l_=c(93614),m=c(90070),Y=c(97161);class ze{constructor(){this.kmsProviders=["vault"],this.authMethods=["token","agent"],this.secretEngines=["kv","transit"],this.sse_s3="AES256",this.sse_kms="aws:kms"}}var ie=(()=>{return(t=ie||(ie={})).ENABLED="Enabled",t.DISABLED="Disabled",ie;var t})(),se=(()=>{return(t=se||(se={})).ENABLED="Enabled",t.SUSPENDED="Suspended",se;var t})(),ae=c(62862),j=c(18372),X=c(60312),B=c(30839),k=c(87925),q=c(94276),z=c(56310),H=c(41582);function it(t,n){1&t&&(e.TgZ(0,"option",29),e.SDv(1,30),e.qZA()),2&t&&e.Q6J("ngValue",null)}function st(t,n){if(1&t&&(e.TgZ(0,"option",31),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function at(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,33),e.qZA())}function lt(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",22),e.SDv(3,23),e.qZA(),e.TgZ(4,"div",24)(5,"select",25),e.YNc(6,it,2,1,"option",26),e.YNc(7,st,2,2,"option",27),e.qZA(),e.YNc(8,at,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(6),e.Q6J("ngIf",null!==_.kmsProviders),e.xp6(1),e.Q6J("ngForOf",_.kmsProviders),e.xp6(1),e.Q6J("ngIf",_.configForm.showError("kms_provider",o,"required"))}}function rt(t,n){if(1&t&&(e.TgZ(0,"option",31),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function ct(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,37),e.qZA())}function dt(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",34),e.SDv(3,35),e.qZA(),e.TgZ(4,"div",24)(5,"select",36),e.YNc(6,rt,2,2,"option",27),e.qZA(),e.YNc(7,ct,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(6),e.Q6J("ngForOf",_.authMethods),e.xp6(1),e.Q6J("ngIf",_.configForm.showError("auth_method",o,"required"))}}function ut(t,n){if(1&t&&(e.TgZ(0,"option",31),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function gt(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,41),e.qZA())}function Rt(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",38),e.SDv(3,39),e.qZA(),e.TgZ(4,"div",24)(5,"select",40),e.YNc(6,ut,2,2,"option",27),e.qZA(),e.YNc(7,gt,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(6),e.Q6J("ngForOf",_.secretEngines),e.xp6(1),e.Q6J("ngIf",_.configForm.showError("secret_engine",o,"required"))}}function Tt(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,45),e.qZA())}function Et(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",42),e.SDv(3,43),e.qZA(),e.TgZ(4,"div",24),e._UZ(5,"input",44),e.YNc(6,Tt,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(6),e.Q6J("ngIf",_.configForm.showError("secret_path",o,"required"))}}function ft(t,n){1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",46),e.SDv(3,47),e.qZA(),e.TgZ(4,"div",24),e._UZ(5,"input",48),e.qZA()()())}function pt(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,52),e.qZA())}function mt(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",7)(2,"label",49),e.SDv(3,50),e.qZA(),e.TgZ(4,"div",24),e._UZ(5,"input",51),e.YNc(6,pt,2,0,"span",28),e.qZA()()()),2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(6),e.Q6J("ngIf",_.configForm.showError("address",o,"required"))}}function Mt(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,57),e.qZA())}function St(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",7)(1,"label",53)(2,"span"),e.SDv(3,54),e.qZA(),e.TgZ(4,"cd-helper"),e.SDv(5,55),e.qZA()(),e.TgZ(6,"div",24)(7,"input",56),e.NdJ("change",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.fileUpload(i.target.files,"token"))}),e.qZA(),e.YNc(8,Mt,2,0,"span",28),e.qZA()()}if(2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(8),e.Q6J("ngIf",_.configForm.showError("token",o,"required"))}}function Ct(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,62),e.qZA())}function Ot(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",7)(2,"label",58)(3,"span"),e.SDv(4,59),e.qZA(),e.TgZ(5,"cd-helper"),e.SDv(6,60),e.qZA()(),e.TgZ(7,"div",24)(8,"input",61),e.NdJ("change",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.fileUpload(i.target.files,"ssl_cert"))}),e.qZA(),e.YNc(9,Ct,2,0,"span",28),e.qZA()()()}if(2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(9),e.Q6J("ngIf",_.configForm.showError("ssl_cert",o,"required"))}}function Ft(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,67),e.qZA())}function Pt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",7)(2,"label",63)(3,"span"),e.SDv(4,64),e.qZA(),e.TgZ(5,"cd-helper"),e.SDv(6,65),e.qZA()(),e.TgZ(7,"div",24)(8,"input",66),e.NdJ("change",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.fileUpload(i.target.files,"client_cert"))}),e.qZA(),e.YNc(9,Ft,2,0,"span",28),e.qZA()()()}if(2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(9),e.Q6J("ngIf",_.configForm.showError("client_cert",o,"required"))}}function Nt(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,72),e.qZA())}function Gt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",7)(2,"label",68)(3,"span"),e.SDv(4,69),e.qZA(),e.TgZ(5,"cd-helper"),e.SDv(6,70),e.qZA()(),e.TgZ(7,"div",24)(8,"input",71),e.NdJ("change",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.fileUpload(i.target.files,"client_key"))}),e.qZA(),e.YNc(9,Nt,2,0,"span",28),e.qZA()()()}if(2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(9),e.Q6J("ngIf",_.configForm.showError("client_key",o,"required"))}}let At=(()=>{class t{constructor(_,o,i,s,l,r,d){this.formBuilder=_,this.activeModal=o,this.router=i,this.actionLabels=s,this.rgwBucketService=l,this.rgwEncryptionModal=r,this.notificationService=d,this.vaultAddress=/^((https?:\/\/)|(www.))(?:([a-zA-Z]+)|(\d+\.\d+.\d+.\d+)):\d{4}$/,this.submitAction=new e.vpe,this.createForm()}ngOnInit(){this.kmsProviders=this.rgwEncryptionModal.kmsProviders,this.authMethods=this.rgwEncryptionModal.authMethods,this.secretEngines=this.rgwEncryptionModal.secretEngines}createForm(){this.configForm=this.formBuilder.group({address:[null,[a.kI.required,m.h.custom("vaultPattern",_=>!E().isEmpty(_)&&!this.vaultAddress.test(_))]],kms_provider:["vault",a.kI.required],encryptionType:["aws:kms",a.kI.required],auth_method:["token",a.kI.required],secret_engine:["kv",a.kI.required],secret_path:["/"],namespace:[null],token:[null,[m.h.requiredIf({auth_method:"token"})]],ssl_cert:[null,m.h.sslCert()],client_cert:[null,m.h.pemCert()],client_key:[null,m.h.sslPrivKey()],kmsEnabled:[{value:!1}],s3Enabled:[{value:!1}]})}fileUpload(_,o){const i=_[0];(new FileReader).addEventListener("load",()=>{const l=this.configForm.get(o);l.setValue(i),l.markAsDirty(),l.markAsTouched(),l.updateValueAndValidity()})}onSubmit(){const _=this.configForm.value;this.rgwBucketService.setEncryptionConfig(_.encryptionType,_.kms_provider,_.auth_method,_.secret_engine,_.secret_path,_.namespace,_.address,_.token,_.owner,_.ssl_cert,_.client_cert,_.client_key).subscribe({next:()=>{this.notificationService.show(w.k.success,"Updated RGW Encryption Configuration values")},error:o=>{this.notificationService.show(w.k.error,o),this.configForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.router.routeReuseStrategy.shouldReuseRoute=()=>!1,this.router.onSameUrlNavigation="reload",this.router.navigate([this.router.url])}})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ae.O),e.Y36(G.Kz),e.Y36(J.F0),e.Y36(I.p4),e.Y36(Ee.o),e.Y36(ze),e.Y36(Y.g))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-config-modal"]],outputs:{submitAction:"submitAction"},features:[e._Bn([ze])],decls:30,vars:14,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W,C,Z,D,U,v,y,f,P,N,te;return n="Update RGW Encryption Configurations",_="Encryption Type",o="SSE-S3 Encryption",i="SSE-KMS Encryption",s="Key management service provider",l="-- Select a provider --",r="This field is required.",d="Authentication Method",u="This field is required.",R="Secret Engine",O="This field is required.",F="Secret Path ",b="This field is required.",h="Namespace ",M="Vault Address ",L="This field is required.",S="Token",W=" The token authentication method expects a Vault token to be present in a plaintext file. ",C="This field is required.",Z="CA Certificate",D="The SSL certificate in PEM format.",U="This field is required.",v="Client Certificate",y="The Client certificate in PEM format.",f="This field is required.",P="Client Private Key",N="The Client Private Key in PEM format.",te="This field is required.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","configForm",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","encryptionType",1,"cd-col-form-label","required"],_,[1,"col-md-auto","custom-checkbox","form-check-inline","ms-3"],["formControlName","encryptionType","id","s3Enabled","type","radio","name","encryptionType","value","AES256",1,"form-check-input"],["for","s3Enabled",1,"custom-check-label"],o,[1,"col-md-auto","custom-checkbox","form-check-inline"],["formControlName","encryptionType","id","kmsEnabled","name","encryptionType","value","aws:kms","type","radio",1,"form-check-input"],["for","kmsEnabled",1,"custom-check-label"],i,[4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"submitText","form","submitActionEvent"],["for","kms_provider",1,"cd-col-form-label","required"],s,[1,"cd-col-form-input"],["id","kms_provider","name","kms_provider","formControlName","kms_provider",1,"form-select"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],["class","invalid-feedback",4,"ngIf"],[3,"ngValue"],l,[3,"value"],[1,"invalid-feedback"],r,["for","auth_method",1,"cd-col-form-label","required"],d,["id","auth_method","name","auth_method","formControlName","auth_method",1,"form-select"],u,["for","secret_engine",1,"cd-col-form-label","required"],R,["id","secret_engine","name","secret_engine","formControlName","secret_engine",1,"form-select"],O,["for","secret_path",1,"cd-col-form-label"],F,["id","secret_path","name","secret_path","type","text","formControlName","secret_path",1,"form-control"],b,["for","namespace",1,"cd-col-form-label"],h,["id","namespace","name","namespace","type","text","formControlName","namespace",1,"form-control"],["for","address",1,"cd-col-form-label","required"],M,["id","address","name","address","formControlName","address","placeholder","http://127.0.0.1:8000",1,"form-control"],L,["for","token",1,"cd-col-form-label","required"],S,W,["type","file","formControlName","token",3,"change"],C,["for","ssl_cert",1,"cd-col-form-label"],Z,D,["type","file","formControlName","ssl_cert",3,"change"],U,["for","client_cert",1,"cd-col-form-label"],v,y,["type","file","formControlName","client_cert",3,"change"],f,["for","client_key",1,"cd-col-form-label"],P,N,["type","file",3,"change"],te]},template:function(_,o){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"div",7)(8,"label",8),e.SDv(9,9),e.qZA(),e.TgZ(10,"div",10),e._UZ(11,"input",11),e.TgZ(12,"label",12),e.SDv(13,13),e.qZA()(),e.TgZ(14,"div",14),e._UZ(15,"input",15),e.TgZ(16,"label",16),e.SDv(17,17),e.qZA()()(),e.YNc(18,lt,9,3,"div",18),e.YNc(19,dt,8,2,"div",18),e.YNc(20,Rt,8,2,"div",18),e.YNc(21,Et,7,1,"div",18),e.YNc(22,ft,6,0,"div",18),e.YNc(23,mt,7,1,"div",18),e.YNc(24,St,9,1,"div",19),e.YNc(25,Ot,10,1,"div",18),e.YNc(26,Pt,10,1,"div",18),e.YNc(27,Gt,10,1,"div",18),e.qZA(),e.TgZ(28,"div",20)(29,"cd-form-button-panel",21),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.qZA()()(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.configForm),e.xp6(14),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","token"===o.configForm.getValue("auth_method")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(1),e.Q6J("ngIf","aws:kms"===o.configForm.getValue("encryptionType")||"AES256"===o.configForm.getValue("encryptionType")),e.xp6(2),e.Q6J("submitText",o.actionLabels.SUBMIT)("form",o.configForm))},dependencies:[T.sg,T.O5,j.S,X.z,B.p,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.EJ,a._,a.JJ,a.JL,a.sg,a.u]}),t})();var ee=c(63285),fe=c(82945),r_=c(63622),_e=c(10545);function It(t,n){1&t&&(e.TgZ(0,"div",9)(1,"label",42),e.SDv(2,43),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",44),e.qZA()())}function bt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,46),e.qZA())}function ht(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,47),e.qZA())}function Lt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,48),e.qZA())}function Wt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,49),e.qZA())}function $t(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,50),e.qZA())}function Zt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,51),e.qZA())}function Dt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,52),e.qZA())}function Ut(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,53),e.qZA())}function vt(t,n){1&t&&(e.TgZ(0,"option",54),e.SDv(1,55),e.qZA()),2&t&&e.Q6J("ngValue",null)}function yt(t,n){1&t&&(e.TgZ(0,"option",54),e.SDv(1,56),e.qZA()),2&t&&e.Q6J("ngValue",null)}function wt(t,n){if(1&t&&(e.TgZ(0,"option",57),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function xt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,58),e.qZA())}function kt(t,n){1&t&&(e.TgZ(0,"option",54),e.SDv(1,60),e.qZA()),2&t&&e.Q6J("ngValue",null)}function zt(t,n){1&t&&(e.TgZ(0,"option",54),e.SDv(1,61),e.qZA()),2&t&&e.Q6J("ngValue",null)}function qt(t,n){if(1&t&&(e.TgZ(0,"option",57),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_.name),e.xp6(1),e.Oqu(_.description)}}function Ht(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,62),e.qZA())}function Xt(t,n){if(1&t&&(e.TgZ(0,"select",59),e.YNc(1,kt,2,1,"option",18),e.YNc(2,zt,2,1,"option",18),e.YNc(3,qt,2,2,"option",19),e.qZA(),e.YNc(4,Ht,2,0,"span",14)),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("ngIf",null===o.placementTargets),e.xp6(1),e.Q6J("ngIf",null!==o.placementTargets),e.xp6(1),e.Q6J("ngForOf",o.placementTargets),e.xp6(1),e.Q6J("ngIf",o.bucketForm.showError("placement-target",_,"required"))}}function Bt(t,n){1&t&&(e.ynx(0),e._UZ(1,"input",63),e.BQk())}function Qt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend",25),e.SDv(2,64),e.qZA(),e.TgZ(3,"div",9)(4,"div",27)(5,"div",28)(6,"input",65),e.NdJ("change",function(){e.CHM(_);const i=e.oxw(2);return e.KtG(i.setMfaDeleteValidators())}),e.qZA(),e.TgZ(7,"label",66),e.SDv(8,67),e.qZA(),e.TgZ(9,"cd-helper")(10,"span"),e.SDv(11,68),e.qZA()()()()()()}}function Yt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,77),e.qZA())}function Jt(t,n){if(1&t&&(e.TgZ(0,"div",9)(1,"label",74),e.SDv(2,75),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",76),e.YNc(5,Yt,2,0,"span",14),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.bucketForm.showError("mfa-token-serial",_,"required"))}}function Kt(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,81),e.qZA())}function Vt(t,n){if(1&t&&(e.TgZ(0,"div",9)(1,"label",78),e.SDv(2,79),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"input",80),e.YNc(5,Kt,2,0,"span",14),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.bucketForm.showError("mfa-token-pin",_,"required"))}}function jt(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend",25),e.SDv(2,69),e.qZA(),e.TgZ(3,"div",9)(4,"div",27)(5,"div",28)(6,"input",70),e.NdJ("change",function(){e.CHM(_);const i=e.oxw(2);return e.KtG(i.setMfaDeleteValidators())}),e.qZA(),e.TgZ(7,"label",71),e.SDv(8,72),e.qZA(),e.TgZ(9,"cd-helper")(10,"span"),e.SDv(11,73),e.qZA()()()()(),e.YNc(12,Jt,6,1,"div",8),e.YNc(13,Vt,6,1,"div",8),e.qZA()}if(2&t){const _=e.oxw(2);e.xp6(12),e.Q6J("ngIf",_.areMfaCredentialsRequired()),e.xp6(1),e.Q6J("ngIf",_.areMfaCredentialsRequired())}}function eo(t,n){1&t&&(e.TgZ(0,"div",9)(1,"label",82),e.SDv(2,83),e.qZA(),e.TgZ(3,"div",12)(4,"select",84)(5,"option",85),e.SDv(6,86),e.qZA(),e.TgZ(7,"option",87),e.SDv(8,88),e.qZA()()()())}function _o(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,93),e.qZA())}function to(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,94),e.qZA())}function oo(t,n){if(1&t&&(e.TgZ(0,"div",9)(1,"label",89),e.ynx(2),e.SDv(3,90),e.BQk(),e.TgZ(4,"cd-helper"),e.SDv(5,91),e.qZA()(),e.TgZ(6,"div",12),e._UZ(7,"input",92),e.YNc(8,_o,2,0,"span",14),e.YNc(9,to,2,0,"span",14),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(8),e.Q6J("ngIf",o.bucketForm.showError("lock_retention_period_days",_,"pattern")),e.xp6(1),e.Q6J("ngIf",o.bucketForm.showError("lock_retention_period_days",_,"lockDays"))}}function no(t,n){1&t&&(e.TgZ(0,"option",54),e.SDv(1,105),e.qZA()),2&t&&e.Q6J("ngValue",null)}function io(t,n){if(1&t&&(e.TgZ(0,"option",57),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function so(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,106),e.qZA())}function ao(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",9)(2,"label",102),e.SDv(3,103),e.qZA(),e.TgZ(4,"div",12)(5,"select",104),e.YNc(6,no,2,1,"option",18),e.YNc(7,io,2,2,"option",19),e.qZA(),e.YNc(8,so,2,0,"span",14),e.qZA()()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("autofocus",o.editing),e.xp6(1),e.Q6J("ngIf",null!==o.kmsProviders),e.xp6(1),e.Q6J("ngForOf",o.kmsProviders),e.xp6(1),e.Q6J("ngIf",o.bucketForm.showError("kms_provider",_,"required"))}}function lo(t,n){1&t&&(e.TgZ(0,"span",45),e.SDv(1,110),e.qZA())}function ro(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",9)(2,"label",107),e.SDv(3,108),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",109),e.YNc(6,lo,2,0,"span",14),e.qZA()()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(6),e.Q6J("ngIf",o.bucketForm.showError("keyId",_,"required"))}}function co(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",9)(2,"div",27)(3,"div",95),e._UZ(4,"input",96),e.TgZ(5,"label",97),e.SDv(6,98),e.qZA()()()(),e.TgZ(7,"div",9)(8,"div",27)(9,"div",95),e._UZ(10,"input",99),e.TgZ(11,"label",100),e.SDv(12,101),e.qZA()()()(),e.YNc(13,ao,9,4,"div",24),e.YNc(14,ro,7,1,"div",24),e.qZA()),2&t){const _=e.oxw(2);e.xp6(4),e.uIk("disabled",!_.s3VaultConfig||null),e.xp6(6),e.uIk("disabled",!_.kmsVaultConfig||null),e.xp6(3),e.Q6J("ngIf","aws:kms"===_.bucketForm.getValue("encryption_type")),e.xp6(1),e.Q6J("ngIf","aws:kms"===_.bucketForm.getValue("encryption_type"))}}const c_=function(t){return{required:t}};function uo(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7),e.YNc(9,It,5,0,"div",8),e.TgZ(10,"div",9)(11,"label",10),e.SDv(12,11),e.qZA(),e.TgZ(13,"div",12),e._UZ(14,"input",13),e.YNc(15,bt,2,0,"span",14),e.YNc(16,ht,2,0,"span",14),e.YNc(17,Lt,2,0,"span",14),e.YNc(18,Wt,2,0,"span",14),e.YNc(19,$t,2,0,"span",14),e.YNc(20,Zt,2,0,"span",14),e.YNc(21,Dt,2,0,"span",14),e.YNc(22,Ut,2,0,"span",14),e.qZA()(),e.TgZ(23,"div",9)(24,"label",15),e.SDv(25,16),e.qZA(),e.TgZ(26,"div",12)(27,"select",17),e.YNc(28,vt,2,1,"option",18),e.YNc(29,yt,2,1,"option",18),e.YNc(30,wt,2,2,"option",19),e.qZA(),e.YNc(31,xt,2,0,"span",14),e.qZA()(),e.TgZ(32,"div",9)(33,"label",20),e.SDv(34,21),e.qZA(),e.TgZ(35,"div",12),e.YNc(36,Xt,5,4,"ng-template",null,22,e.W1O),e.YNc(38,Bt,2,0,"ng-container",23),e.qZA()(),e.YNc(39,Qt,12,0,"fieldset",24),e.YNc(40,jt,14,2,"fieldset",24),e.TgZ(41,"fieldset")(42,"legend",25),e.SDv(43,26),e.qZA(),e.TgZ(44,"div",9)(45,"div",27)(46,"div",28),e._UZ(47,"input",29),e.TgZ(48,"label",30),e.SDv(49,31),e.qZA(),e.TgZ(50,"cd-helper")(51,"span"),e.SDv(52,32),e.qZA()()()()(),e.YNc(53,eo,9,0,"div",8),e.YNc(54,oo,10,2,"div",8),e.qZA(),e.TgZ(55,"fieldset")(56,"legend",25),e.SDv(57,33),e.qZA(),e.TgZ(58,"div",9)(59,"div",27)(60,"div",28),e._UZ(61,"input",34),e.TgZ(62,"label",35),e.SDv(63,36),e.qZA(),e.TgZ(64,"cd-helper",37)(65,"span"),e.tHW(66,38),e.TgZ(67,"a",39),e.NdJ("click",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.openConfigModal())}),e.qZA(),e.N_p(),e.qZA()()()()(),e.YNc(68,co,15,4,"div",24),e.qZA()(),e.TgZ(69,"div",40)(70,"cd-form-button-panel",41),e.NdJ("submitActionEvent",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.submit())}),e.ALo(71,"titlecase"),e.ALo(72,"upperFirst"),e.qZA()()()()()}if(2&t){const _=e.MAs(2),o=e.MAs(37),i=e.oxw();e.xp6(1),e.Q6J("formGroup",i.bucketForm),e.xp6(6),e.pQV(e.lcZ(6,31,i.action))(e.lcZ(7,33,i.resource)),e.QtT(5),e.xp6(2),e.Q6J("ngIf",i.editing),e.xp6(2),e.Q6J("ngClass",e.VKq(39,c_,!i.editing)),e.xp6(3),e.Q6J("readonly",i.editing)("autofocus",!i.editing),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"required")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameInvalid")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"bucketNameNotAllowed")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"containsUpperCase")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"lowerCaseOrNumber")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"ipAddress")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"onlyLowerCaseAndNumbers")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("bid",_,"shouldBeInRange")),e.xp6(5),e.Q6J("autofocus",i.editing),e.xp6(1),e.Q6J("ngIf",null===i.owners),e.xp6(1),e.Q6J("ngIf",null!==i.owners),e.xp6(1),e.Q6J("ngForOf",i.owners),e.xp6(1),e.Q6J("ngIf",i.bucketForm.showError("owner",_,"required")),e.xp6(2),e.Q6J("ngClass",e.VKq(41,c_,!i.editing)),e.xp6(5),e.Q6J("ngIf",i.editing)("ngIfElse",o),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(1),e.Q6J("ngIf",i.editing),e.xp6(13),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(1),e.Q6J("ngIf",i.bucketForm.getValue("lock_enabled")),e.xp6(7),e.uIk("disabled",!i.kmsVaultConfig&&!i.s3VaultConfig||null),e.xp6(7),e.Q6J("ngIf",i.bucketForm.getValue("encryption_enabled")),e.xp6(2),e.Q6J("form",i.bucketForm)("submitText",e.lcZ(71,35,i.action)+" "+e.lcZ(72,37,i.resource))}}let d_=(()=>{class t extends l_.E{get isVersioningEnabled(){return this.bucketForm.getValue("versioning")}get isMfaDeleteEnabled(){return this.bucketForm.getValue("mfa-delete")}constructor(_,o,i,s,l,r,d,u,R,O,F){super(),this.route=_,this.router=o,this.formBuilder=i,this.rgwBucketService=s,this.rgwSiteService=l,this.modalService=r,this.rgwUserService=d,this.notificationService=u,this.rgwEncryptionModal=R,this.actionLabels=O,this.changeDetectorRef=F,this.editing=!1,this.owners=null,this.kmsProviders=null,this.placementTargets=[],this.isVersioningAlreadyEnabled=!1,this.isMfaDeleteAlreadyEnabled=!1,this.icons=$.P,this.kmsVaultConfig=!1,this.s3VaultConfig=!1,this.editing=this.router.url.startsWith(`/rgw/bucket/${I.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="bucket",this.createForm()}ngAfterViewChecked(){this.changeDetectorRef.detectChanges()}createForm(){const _=this,o=m.h.custom("lockDays",()=>{if(!_.bucketForm||!E().get(_.bucketForm.getRawValue(),"lock_enabled"))return!1;const i=Number(_.bucketForm.getValue("lock_retention_period_days"));return!Number.isInteger(i)||0===i});this.bucketForm=this.formBuilder.group({id:[null],bid:[null,[a.kI.required],this.editing?[]:[m.h.bucketName(),m.h.bucketExistence(!1,this.rgwBucketService)]],owner:[null,[a.kI.required]],kms_provider:["vault"],"placement-target":[null,this.editing?[]:[a.kI.required]],versioning:[null],"mfa-delete":[null],"mfa-token-serial":[""],"mfa-token-pin":[""],lock_enabled:[{value:!1,disabled:this.editing}],encryption_enabled:[null],encryption_type:[null,[m.h.requiredIf({encryption_enabled:!0})]],keyId:[null,[m.h.requiredIf({encryption_type:"aws:kms",encryption_enabled:!0})]],lock_mode:["COMPLIANCE"],lock_retention_period_days:[0,[m.h.number(!1),o]]})}ngOnInit(){const _={owners:this.rgwUserService.enumerate()};this.kmsProviders=this.rgwEncryptionModal.kmsProviders,this.rgwBucketService.getEncryptionConfig().subscribe(o=>{this.kmsVaultConfig=o[0],this.s3VaultConfig=o[1],this.kmsVaultConfig&&this.s3VaultConfig?this.bucketForm.get("encryption_type").setValue(""):this.kmsVaultConfig?this.bucketForm.get("encryption_type").setValue("aws:kms"):this.s3VaultConfig?this.bucketForm.get("encryption_type").setValue("AES256"):this.bucketForm.get("encryption_type").setValue("")}),this.editing||(_.getPlacementTargets=this.rgwSiteService.get("placement-targets")),this.route.params.subscribe(o=>{if(o.hasOwnProperty("bid")){const i=decodeURIComponent(o.bid);_.getBid=this.rgwBucketService.get(i)}(0,ne.D)(_).subscribe(i=>{if(this.owners=i.owners.sort(),i.getPlacementTargets){const s=i.getPlacementTargets;this.zonegroup=s.zonegroup,E().forEach(s.placement_targets,l=>{l.description=`${l.name} (${"pool"}: ${l.data_pool})`,this.placementTargets.push(l)}),1===this.placementTargets.length&&this.bucketForm.get("placement-target").setValue(this.placementTargets[0].name)}if(i.getBid){const s=i.getBid,l=E().clone(this.bucketForm.getRawValue());let r=E().pick(s,E().keys(l));r.lock_retention_period_days=this.rgwBucketService.getLockDays(s),r["placement-target"]=s.placement_rule,r.versioning=s.versioning===se.ENABLED,r["mfa-delete"]=s.mfa_delete===ie.ENABLED,r.encryption_enabled="Enabled"===s.encryption,r=E().merge(l,r),this.bucketForm.setValue(r),this.editing&&(this.isVersioningAlreadyEnabled=this.isVersioningEnabled,this.isMfaDeleteAlreadyEnabled=this.isMfaDeleteEnabled,this.setMfaDeleteValidators(),r.lock_enabled&&this.bucketForm.controls.versioning.disable())}this.loadingReady()})})}goToListView(){this.router.navigate(["/rgw/bucket"])}submit(){if(null==this.bucketForm.getValue("encryption_enabled")&&(this.bucketForm.get("encryption_enabled").setValue(!1),this.bucketForm.get("encryption_type").setValue(null)),this.bucketForm.pristine)return void this.goToListView();const _=this.bucketForm.value;if(this.editing){const o=this.getVersioningStatus(),i=this.getMfaDeleteStatus();this.rgwBucketService.update(_.bid,_.id,_.owner,o,_.encryption_enabled,_.encryption_type,_.keyId,i,_["mfa-token-serial"],_["mfa-token-pin"],_.lock_mode,_.lock_retention_period_days).subscribe(()=>{this.notificationService.show(w.k.success,"Updated Object Gateway bucket '" + _.bid + "'."),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}else this.rgwBucketService.create(_.bid,_.owner,this.zonegroup,_["placement-target"],_.lock_enabled,_.lock_mode,_.lock_retention_period_days,_.encryption_enabled,_.encryption_type,_.keyId).subscribe(()=>{this.notificationService.show(w.k.success,"Created Object Gateway bucket '" + _.bid + "'"),this.goToListView()},()=>{this.bucketForm.setErrors({cdSubmitButton:!0})})}areMfaCredentialsRequired(){return this.isMfaDeleteEnabled!==this.isMfaDeleteAlreadyEnabled||this.isMfaDeleteAlreadyEnabled&&this.isVersioningEnabled!==this.isVersioningAlreadyEnabled}setMfaDeleteValidators(){const _=this.bucketForm.get("mfa-token-serial"),o=this.bucketForm.get("mfa-token-pin");this.areMfaCredentialsRequired()?(_.setValidators(a.kI.required),o.setValidators(a.kI.required)):(_.setValidators(null),o.setValidators(null)),_.updateValueAndValidity(),o.updateValueAndValidity()}getVersioningStatus(){return this.isVersioningEnabled?se.ENABLED:se.SUSPENDED}getMfaDeleteStatus(){return this.isMfaDeleteEnabled?ie.ENABLED:ie.DISABLED}fileUpload(_,o){const i=_[0];(new FileReader).addEventListener("load",()=>{const l=this.bucketForm.get(o);l.setValue(i),l.markAsDirty(),l.markAsTouched(),l.updateValueAndValidity()})}openConfigModal(){this.modalService.show(At,null,{size:"lg"}).componentInstance.configForm.get("encryptionType").setValue(this.bucketForm.getValue("encryption_type")||"AES256")}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(J.gz),e.Y36(J.F0),e.Y36(ae.O),e.Y36(Ee.o),e.Y36(n_.I),e.Y36(ee.Z),e.Y36(Q),e.Y36(Y.g),e.Y36(ze),e.Y36(I.p4),e.Y36(e.sBO))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-form"]],features:[e._Bn([ze]),e.qOj],decls:1,vars:1,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W,C,Z,D,U,v,y,f,P,N,te,A,Me,Se,Ce,Oe,Fe,Pe,Ne,Ge,Ae,Ie,be,he,Le,We,$e,Ze,De,Ue,ve,ye,we;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Name...",i="Owner",s="Placement target",l="Locking",r="Enabled",d="Enables locking for the objects in the bucket. Locking can only be enabled while creating a bucket.",u="Security",R="Encryption",O="Enables encryption for the objects in the bucket. To enable encryption on a bucket you need to set the configuration values for SSE-S3 or SSE-KMS. To set the configuration values " + "\ufffd#67\ufffd" + "Click here" + "\ufffd/#67\ufffd" + "",F="Id",b="This field is required.",h="Bucket names can only contain lowercase letters, numbers, periods and hyphens.",M="The chosen name is already in use.",L="Bucket names must not contain uppercase characters or underscores.",S="Each label must start and end with a lowercase letter or a number.",W="Bucket names cannot be formatted as IP address.",C="Bucket labels cannot be empty and can only contain lowercase letters, numbers and hyphens.",Z="Bucket names must be 3 to 63 characters long.",D="Loading...",U="-- Select a user --",v="This field is required.",y="Loading...",f="-- Select a placement target --",P="This field is required.",N="Versioning",te="Enabled",A="Enables versioning for the objects in the bucket.",Me="Multi-Factor Authentication",Se="Delete enabled",Ce="Enables MFA (multi-factor authentication) Delete, which requires additional authentication for changing the bucket versioning state.",Oe="Token Serial Number",Fe="This field is required.",Pe="Token PIN",Ne="This field is required.",Ge="Mode",Ae="Compliance",Ie="Governance",be="Days",he="The number of days that you want to specify for the default retention period that will be applied to new objects placed in this bucket.",Le="The entered value must be a positive integer.",We="Retention Days must be a positive integer.",$e="SSE-S3 Encryption",Ze="Connect to an external key management service",De="KMS Provider",Ue="-- Select a provider --",ve="This field is required.",ye="Key Id ",we="This field is required.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","bucketForm","novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],n,[1,"card-body"],["class","form-group row",4,"ngIf"],[1,"form-group","row"],["for","bid",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","bid","name","bid","type","text","placeholder",o,"formControlName","bid",1,"form-control",3,"readonly","autofocus"],["class","invalid-feedback",4,"ngIf"],["for","owner",1,"cd-col-form-label","required"],i,["id","owner","name","owner","formControlName","owner",1,"form-select",3,"autofocus"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],["for","placement-target",1,"cd-col-form-label",3,"ngClass"],s,["placementTargetSelect",""],[4,"ngIf","ngIfElse"],[4,"ngIf"],[1,"cd-header"],l,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","lock_enabled","formControlName","lock_enabled","type","checkbox",1,"custom-control-input"],["for","lock_enabled",1,"custom-control-label"],r,d,u,["id","encryption_enabled","name","encryption_enabled","formControlName","encryption_enabled","type","checkbox",1,"form-check-input"],["for","encryption_enabled",1,"form-check-label"],R,["aria-label","toggle encryption helper"],O,["href","#/rgw/bucket/create","aria-label","click here",3,"click"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],["for","id",1,"cd-col-form-label"],F,["id","id","name","id","type","text","formControlName","id","readonly","",1,"form-control"],[1,"invalid-feedback"],b,h,M,L,S,W,C,Z,[3,"ngValue"],D,U,[3,"value"],v,["id","placement-target","name","placement-target","formControlName","placement-target",1,"form-select"],y,f,P,["id","placement-target","name","placement-target","formControlName","placement-target","type","text","readonly","",1,"form-control"],N,["type","checkbox","id","versioning","name","versioning","formControlName","versioning",1,"custom-control-input",3,"change"],["for","versioning",1,"custom-control-label"],te,A,Me,["type","checkbox","id","mfa-delete","name","mfa-delete","formControlName","mfa-delete",1,"custom-control-input",3,"change"],["for","mfa-delete",1,"custom-control-label"],Se,Ce,["for","mfa-token-serial",1,"cd-col-form-label"],Oe,["type","text","id","mfa-token-serial","name","mfa-token-serial","formControlName","mfa-token-serial",1,"form-control"],Fe,["for","mfa-token-pin",1,"cd-col-form-label"],Pe,["type","text","id","mfa-token-pin","name","mfa-token-pin","formControlName","mfa-token-pin",1,"form-control"],Ne,["for","lock_mode",1,"cd-col-form-label"],Ge,["formControlName","lock_mode","name","lock_mode","id","lock_mode",1,"form-select"],["value","COMPLIANCE"],Ae,["value","GOVERNANCE"],Ie,["for","lock_retention_period_days",1,"cd-col-form-label"],be,he,["type","number","id","lock_retention_period_days","formControlName","lock_retention_period_days","min","0",1,"form-control"],Le,We,[1,"custom-control","custom-radio","custom-control-inline","ps-5"],["formControlName","encryption_type","id","sse_S3_enabled","type","radio","name","encryption_type","value","AES256",1,"form-check-input"],["for","sse_S3_enabled",1,"form-control-label"],$e,["formControlName","encryption_type","id","kms_enabled","name","encryption_type","value","aws:kms","type","radio",1,"form-check-input"],["for","kms_enabled",1,"form-control-label"],Ze,["for","kms_provider",1,"cd-col-form-label","required"],De,["id","kms_provider","name","kms_provider","formControlName","kms_provider",1,"form-select",3,"autofocus"],Ue,ve,["for","keyId",1,"cd-col-form-label","required"],ye,["id","keyId","name","keyId","type","text","formControlName","keyId",1,"form-control"],we]},template:function(_,o){1&_&&e.YNc(0,uo,73,43,"div",0),2&_&&e.Q6J("cdFormLoading",o.loading)},dependencies:[T.mk,T.sg,T.O5,j.S,B.p,fe.U,r_.y,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.wV,a.Wl,a.EJ,a._,a.JJ,a.JL,a.qQ,a.sg,a.u,T.rS,_e.m]}),t})();var u_=c(70882),Be=c(68136),Qe=c(30982),le=c(59019),qe=c(68774),Ye=c(47557),g_=c(66369),re=c(51847),ce=c(47640),R_=c(60251),Je=c(94928),T_=c(68962),go=c(96102);function Ro(t,n){1&t&&(e.TgZ(0,"td"),e.SDv(1,17),e.qZA())}function To(t,n){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimless"),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.selection.bucket_quota.max_size)," ")}}function Eo(t,n){1&t&&(e.TgZ(0,"td"),e.SDv(1,18),e.qZA())}function fo(t,n){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.hij(" ",_.selection.bucket_quota.max_objects," ")}}function po(t,n){if(1&t&&(e.ynx(0),e.TgZ(1,"tr")(2,"td",5),e.SDv(3,15),e.qZA(),e.YNc(4,Ro,2,0,"td",0),e.YNc(5,To,3,3,"td",0),e.qZA(),e.TgZ(6,"tr")(7,"td",5),e.SDv(8,16),e.qZA(),e.YNc(9,Eo,2,0,"td",0),e.YNc(10,fo,2,1,"td",0),e.qZA(),e.BQk()),2&t){const _=e.oxw(2);e.xp6(4),e.Q6J("ngIf",_.selection.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",_.selection.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.selection.bucket_quota.max_objects>-1)}}function mo(t,n){if(1&t&&(e.ynx(0),e.TgZ(1,"tr")(2,"td",5),e.SDv(3,19),e.qZA(),e.TgZ(4,"td"),e._uU(5),e.qZA()(),e.TgZ(6,"tr")(7,"td",5),e.SDv(8,20),e.qZA(),e.TgZ(9,"td"),e._uU(10),e.qZA()(),e.BQk()),2&t){const _=e.oxw(2);e.xp6(5),e.Oqu(_.selection.lock_mode),e.xp6(5),e.Oqu(_.selection.lock_retention_period_days)}}function Mo(t,n){if(1&t&&(e.ynx(0),e.TgZ(1,"table",1)(2,"tbody")(3,"tr")(4,"td",2),e.SDv(5,3),e.qZA(),e.TgZ(6,"td",4),e._uU(7),e.qZA()(),e.TgZ(8,"tr")(9,"td",5),e.SDv(10,6),e.qZA(),e.TgZ(11,"td"),e._uU(12),e.qZA()(),e.TgZ(13,"tr")(14,"td",5),e.SDv(15,7),e.qZA(),e.TgZ(16,"td"),e._uU(17),e.qZA()(),e.TgZ(18,"tr")(19,"td",5),e.SDv(20,8),e.qZA(),e.TgZ(21,"td"),e._uU(22),e.qZA()(),e.TgZ(23,"tr")(24,"td",5),e.SDv(25,9),e.qZA(),e.TgZ(26,"td"),e._uU(27),e.qZA()(),e.TgZ(28,"tr")(29,"td",5),e.SDv(30,10),e.qZA(),e.TgZ(31,"td"),e._uU(32),e.ALo(33,"cdDate"),e.qZA()()()(),e.TgZ(34,"div")(35,"legend"),e.SDv(36,11),e.qZA(),e.TgZ(37,"table",1)(38,"tbody")(39,"tr")(40,"td",2),e.SDv(41,12),e.qZA(),e.TgZ(42,"td",4),e._uU(43),e.ALo(44,"booleanText"),e.qZA()(),e.YNc(45,po,11,4,"ng-container",0),e.qZA()()(),e.TgZ(46,"legend"),e.SDv(47,13),e.qZA(),e.TgZ(48,"table",1)(49,"tbody")(50,"tr")(51,"td",2),e.SDv(52,14),e.qZA(),e.TgZ(53,"td",4),e._uU(54),e.ALo(55,"booleanText"),e.qZA()(),e.YNc(56,mo,11,2,"ng-container",0),e.qZA()(),e.BQk()),2&t){const _=e.oxw();e.xp6(7),e.Oqu(_.selection.versioning),e.xp6(5),e.Oqu(_.selection.encryption),e.xp6(5),e.Oqu(_.selection.mfa_delete),e.xp6(5),e.Oqu(_.selection.index_type),e.xp6(5),e.Oqu(_.selection.placement_rule),e.xp6(5),e.Oqu(e.lcZ(33,10,_.selection.mtime)),e.xp6(11),e.Oqu(e.lcZ(44,12,_.selection.bucket_quota.enabled)),e.xp6(2),e.Q6J("ngIf",_.selection.bucket_quota.enabled),e.xp6(9),e.Oqu(e.lcZ(55,14,_.selection.lock_enabled)),e.xp6(2),e.Q6J("ngIf",_.selection.lock_enabled)}}let So=(()=>{class t{constructor(_){this.rgwBucketService=_}ngOnChanges(){this.selection&&this.rgwBucketService.get(this.selection.bid).subscribe(_=>{_.lock_retention_period_days=this.rgwBucketService.getLockDays(_),this.selection=_})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Ee.o))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L;return n="Versioning",_="Encryption",o="MFA Delete",i="Index type",s="Placement rule",l="Last modification time",r="Bucket quota",d="Enabled",u="Locking",R="Enabled",O="Maximum size",F="Maximum objects",b="Unlimited",h="Unlimited",M="Mode",L="Days",[[4,"ngIf"],[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],n,[1,"w-75"],[1,"bold"],_,o,i,s,l,r,d,u,R,O,F,b,h,M,L]},template:function(_,o){1&_&&e.YNc(0,Mo,57,16,"ng-container",0),2&_&&e.Q6J("ngIf",o.selection)},dependencies:[T.O5,T_.T,g_.n,go.N],styles:["table[_ngcontent-%COMP%]{table-layout:fixed}table[_ngcontent-%COMP%] td[_ngcontent-%COMP%]{word-wrap:break-word}"]}),t})();const Co=["bucketSizeTpl"],Oo=["bucketObjectTpl"];function Fo(t,n){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_size)("used",_.bucket_size)}}function Po(t,n){1&t&&e.SDv(0,9)}function No(t,n){if(1&t&&(e.YNc(0,Fo,1,2,"cd-usage-bar",6),e.YNc(1,Po,1,0,"ng-template",null,7,e.W1O)),2&t){const _=n.row,o=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_size>0&&_.bucket_quota.enabled)("ngIfElse",o)}}function Go(t,n){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.bucket_quota.max_objects)("used",_.num_objects)("isBinary",!1)}}function Ao(t,n){1&t&&e.SDv(0,13)}function Io(t,n){if(1&t&&(e.YNc(0,Go,1,3,"cd-usage-bar",10),e.YNc(1,Ao,1,0,"ng-template",null,11,e.W1O)),2&t){const _=n.row,o=e.MAs(2);e.Q6J("ngIf",_.bucket_quota.max_objects>0&&_.bucket_quota.enabled)("ngIfElse",o)}}let ho=(()=>{class t extends Be.o{constructor(_,o,i,s,l,r,d,u){super(u),this.authStorageService=_,this.dimlessBinaryPipe=o,this.dimlessPipe=i,this.rgwBucketService=s,this.modalService=l,this.urlBuilder=r,this.actionLabels=d,this.ngZone=u,this.columns=[],this.buckets=[],this.selection=new qe.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Name",prop:"bid",flexGrow:2},{name:"Owner",prop:"owner",flexGrow:2.5},{name:"Used Capacity",prop:"bucket_size",flexGrow:.6,pipe:this.dimlessBinaryPipe},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.bucketSizeTpl,flexGrow:.8},{name:"Objects",prop:"num_objects",flexGrow:.6,pipe:this.dimlessPipe},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.bucketObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().bid)}`;this.tableActions=[{permission:"create",icon:$.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:l=>!l.hasSelection},{permission:"update",icon:$.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:$.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:l=>l.hasMultiSelection}],this.setTableRefreshTimeout()}transformBucketData(){E().forEach(this.buckets,_=>{const o=_.bucket_quota.max_size,i=_.bucket_quota.max_objects;_.bucket_size=0,_.num_objects=0,E().isEmpty(_.usage)||(_.bucket_size=_.usage["rgw.main"].size_actual,_.num_objects=_.usage["rgw.main"].num_objects),_.size_usage=o>0?_.bucket_size/o:void 0,_.object_usage=i>0?_.num_objects/i:void 0})}getBucketList(_){this.setTableRefreshTimeout(),this.rgwBucketService.list(!0).subscribe(o=>{this.buckets=o,this.transformBucketData()},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(Qe.M,{itemDescription:this.selection.hasSingleSelection?"bucket":"buckets",itemNames:this.selection.selected.map(_=>_.bid),submitActionObservable:()=>new u_.y(_=>{(0,ne.D)(this.selection.selected.map(o=>this.rgwBucketService.delete(o.bid))).subscribe({error:o=>{_.error(o),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ce.j),e.Y36(Ye.$),e.Y36(g_.n),e.Y36(Ee.o),e.Y36(ee.Z),e.Y36(re.F),e.Y36(I.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-bucket-list"]],viewQuery:function(_,o){if(1&_&&(e.Gf(le.a,7),e.Gf(Co,7),e.Gf(Oo,7)),2&_){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.bucketSizeTpl=i.first),e.iGM(i=e.CRH())&&(o.bucketObjectTpl=i.first)}},features:[e._Bn([{provide:re.F,useValue:new re.F("rgw/bucket")}]),e.qOj],decls:8,vars:9,consts:function(){let n,_;return n="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","bid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["bucketSizeTpl",""],["bucketObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],n,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,o){1&_&&(e.TgZ(0,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return o.setExpandedRow(s)})("updateSelection",function(s){return o.updateSelection(s)})("fetchData",function(s){return o.getBucketList(s)}),e._UZ(2,"cd-table-actions",2)(3,"cd-rgw-bucket-details",3),e.qZA(),e.YNc(4,No,3,2,"ng-template",null,4,e.W1O),e.YNc(6,Io,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.Q6J("autoReload",!1)("data",o.buckets)("columns",o.columns)("hasDetails",!0)("status",o.tableStatus),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("selection",o.expandedRow))},dependencies:[T.O5,R_.O,le.a,Je.K,So]}),t})();var Lo=c(58111),E_=c(76317),f_=c(61350),Wo=c(59376),$o=c(60351);function Zo(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table-key-value",11),e.NdJ("fetchData",function(){e.CHM(_);const i=e.oxw(2);return e.KtG(i.getMetaData())}),e.qZA()}if(2&t){const _=e.oxw(2);e.Q6J("data",_.metadata)}}function Do(t,n){if(1&t&&e._UZ(0,"cd-table-performance-counter",12),2&t){const _=e.oxw(2);e.Q6J("serviceId",_.serviceMapId)}}function Uo(t,n){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.Q6J("grafanaPath","rgw-instance-detail?var-rgw_servers=rgw."+_.serviceId)("type","metrics")}}function vo(t,n){1&t&&(e.ynx(0,13),e.TgZ(1,"a",4),e.SDv(2,14),e.qZA(),e.YNc(3,Uo,1,2,"ng-template",6),e.BQk())}function yo(t,n){if(1&t&&(e.ynx(0),e.TgZ(1,"nav",1,2),e.ynx(3,3),e.TgZ(4,"a",4),e.SDv(5,5),e.qZA(),e.YNc(6,Zo,1,1,"ng-template",6),e.BQk(),e.ynx(7,7),e.TgZ(8,"a",4),e.SDv(9,8),e.qZA(),e.YNc(10,Do,1,1,"ng-template",6),e.BQk(),e.YNc(11,vo,4,0,"ng-container",9),e.qZA(),e._UZ(12,"div",10),e.BQk()),2&t){const _=e.MAs(2),o=e.oxw();e.xp6(11),e.Q6J("ngIf",o.grafanaPermission.read),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let wo=(()=>{class t{constructor(_,o){this.rgwDaemonService=_,this.authStorageService=o,this.serviceId="",this.serviceMapId="",this.grafanaPermission=this.authStorageService.getPermissions().grafana}ngOnChanges(){this.selection&&(this.serviceId=this.selection.id,this.serviceMapId=this.selection.service_map_id)}getMetaData(){E().isEmpty(this.serviceId)||this.rgwDaemonService.get(this.serviceId).subscribe(_=>{this.metadata=_.rgw_metadata})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(oe.b),e.Y36(ce.j))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-details"]],inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let n,_,o,i;return n="Details",_="Performance Counters",o="Performance Details",i="RGW instance details",[[4,"ngIf"],["ngbNav","","cdStatefulTab","rgw-daemon-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],n,["ngbNavContent",""],["ngbNavItem","performance-counters"],_,["ngbNavItem","performance-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"data","fetchData"],["serviceType","rgw",3,"serviceId"],["ngbNavItem","performance-details"],o,["title",i,"uid","x5ARzZtmk","grafanaStyle","one",3,"grafanaPath","type"]]},template:function(_,o){1&_&&e.YNc(0,yo,13,2,"ng-container",0),2&_&&e.Q6J("ngIf",o.selection)},dependencies:[T.O5,E_.F,f_.b,Wo.m,$o.p,G.uN,G.Pz,G.nv,G.Vx,G.tO,G.Dy]}),t})();function xo(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",8),e.NdJ("setExpandedRow",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.setExpandedRow(i))})("fetchData",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.getDaemonList(i))}),e._UZ(1,"cd-rgw-daemon-details",9),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.daemons)("columns",_.columns)("hasDetails",!0),e.xp6(1),e.Q6J("selection",_.expandedRow)}}function ko(t,n){1&t&&e._UZ(0,"cd-grafana",11),2&t&&e.Q6J("grafanaPath","rgw-overview?")("type","metrics")}function zo(t,n){1&t&&(e.ynx(0,2),e.TgZ(1,"a",3),e.SDv(2,10),e.qZA(),e.YNc(3,ko,1,2,"ng-template",5),e.BQk())}function qo(t,n){1&t&&e._UZ(0,"cd-grafana",13),2&t&&e.Q6J("grafanaPath","radosgw-sync-overview?")("type","metrics")}function Ho(t,n){1&t&&(e.ynx(0,2),e.TgZ(1,"a",3),e.SDv(2,12),e.qZA(),e.YNc(3,qo,1,2,"ng-template",5),e.BQk())}let Xo=(()=>{class t extends Be.o{constructor(_,o,i,s){super(),this.rgwDaemonService=_,this.authStorageService=o,this.cephShortVersionPipe=i,this.rgwSiteService=s,this.columns=[],this.daemons=[],this.updateDaemons=l=>{this.daemons=l}}ngOnInit(){this.grafanaPermission=this.authStorageService.getPermissions().grafana,this.columns=[{name:"ID",prop:"id",flexGrow:2},{name:"Hostname",prop:"server_hostname",flexGrow:2},{name:"Port",prop:"port",flexGrow:1},{name:"Realm",prop:"realm_name",flexGrow:2},{name:"Zone Group",prop:"zonegroup_name",flexGrow:2},{name:"Zone",prop:"zone_name",flexGrow:2},{name:"Version",prop:"version",flexGrow:1,pipe:this.cephShortVersionPipe}],this.rgwSiteService.get("realms").subscribe(_=>this.isMultiSite=_.length>0)}getDaemonList(_){this.rgwDaemonService.list().subscribe(this.updateDaemons,()=>{_.error()})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(oe.b),e.Y36(ce.j),e.Y36(Lo.F),e.Y36(n_.I))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-daemon-list"]],features:[e.qOj],decls:9,vars:3,consts:function(){let n,_,o,i,s;return n="Gateways List",_="Overall Performance",o="RGW overview",i="Sync Performance",s="Radosgw sync overview",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],n,["ngbNavContent",""],["ngbNavItem","",4,"ngIf"],[3,"ngbNavOutlet"],["columnMode","flex",3,"data","columns","hasDetails","setExpandedRow","fetchData"],["cdTableDetail","",3,"selection"],_,["title",o,"uid","WAkugZpiz","grafanaStyle","two",3,"grafanaPath","type"],i,["title",s,"uid","rgw-sync-overview","grafanaStyle","two",3,"grafanaPath","type"]]},template:function(_,o){if(1&_&&(e.TgZ(0,"nav",0,1),e.ynx(2,2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,xo,2,4,"ng-template",5),e.BQk(),e.YNc(6,zo,4,0,"ng-container",6),e.YNc(7,Ho,4,0,"ng-container",6),e.qZA(),e._UZ(8,"div",7)),2&_){const i=e.MAs(1);e.xp6(6),e.Q6J("ngIf",o.grafanaPermission.read),e.xp6(1),e.Q6J("ngIf",o.grafanaPermission.read&&o.isMultiSite),e.xp6(1),e.Q6J("ngbNavOutlet",i)}},dependencies:[T.O5,E_.F,le.a,G.uN,G.Pz,G.nv,G.Vx,G.tO,G.Dy,wo]}),t})();var Bo=c(6481),Ke=c(28211),He=(()=>{return(t=He||(He={})).USERS="users",t.BUCKETS="buckets",t.METADATA="metadata",t.USAGE="usage",t.ZONE="zone",He;var t})();let p_=(()=>{class t{static getAll(){return Object.values(t.capabilities)}}return t.capabilities=He,t})();function Qo(t,n){1&t&&e._UZ(0,"input",22),2&t&&e.Q6J("readonly",!0)}function Yo(t,n){1&t&&(e.TgZ(0,"option",17),e.SDv(1,25),e.qZA()),2&t&&e.Q6J("ngValue",null)}function Jo(t,n){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function Ko(t,n){if(1&t&&(e.TgZ(0,"select",23),e.YNc(1,Yo,2,1,"option",24),e.YNc(2,Jo,2,2,"option",19),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.types),e.xp6(1),e.Q6J("ngForOf",_.types)}}function Vo(t,n){1&t&&(e.TgZ(0,"span",27),e.SDv(1,28),e.qZA())}function jo(t,n){if(1&t&&(e.TgZ(0,"option",26),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function en(t,n){1&t&&(e.TgZ(0,"span",27),e.SDv(1,29),e.qZA())}const _n=function(t){return{required:t}},tn=function(){return["read","write","*"]};let on=(()=>{class t{constructor(_,o,i){this.formBuilder=_,this.activeModal=o,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.types=[],this.resource="capability",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({type:[null,[a.kI.required]],perm:[null,[a.kI.required]]})}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.ADD}setValues(_,o){this.formGroup.setValue({type:_,perm:o})}setCapabilities(_){const o=[];_.forEach(i=>{o.push(i.type)}),this.types=[],p_.getAll().forEach(i=>{-1===E().indexOf(o,i)&&this.types.push(i)})}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ae.O),e.Y36(G.Kz),e.Y36(I.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-capability-modal"]],outputs:{submitAction:"submitAction"},decls:29,vars:24,consts:function(){let n,_,o,i,s,l,r;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Type",o="Permission",i="-- Select a permission --",s="-- Select a type --",l="This field is required.",r="This field is required.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","type",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","type","class","form-control","type","text","formControlName","type",3,"readonly",4,"ngIf"],["id","type","class","form-select","formControlName","type","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],o,["id","perm","formControlName","perm",1,"form-select"],[3,"ngValue"],i,[3,"value",4,"ngFor","ngForOf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["id","type","type","text","formControlName","type",1,"form-control",3,"readonly"],["id","type","formControlName","type","autofocus","",1,"form-select"],[3,"ngValue",4,"ngIf"],s,[3,"value"],[1,"invalid-feedback"],l,r]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,Qo,1,1,"input",11),e.YNc(14,Ko,3,2,"select",12),e.YNc(15,Vo,2,0,"span",13),e.qZA()(),e.TgZ(16,"div",7)(17,"label",14),e.SDv(18,15),e.qZA(),e.TgZ(19,"div",10)(20,"select",16)(21,"option",17),e.SDv(22,18),e.qZA(),e.YNc(23,jo,2,2,"option",19),e.qZA(),e.YNc(24,en,2,0,"span",13),e.qZA()()(),e.TgZ(25,"div",20)(26,"cd-form-button-panel",21),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(27,"titlecase"),e.ALo(28,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,13,o.action))(e.lcZ(4,15,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(21,_n,!o.editing)),e.xp6(3),e.Q6J("ngIf",o.editing),e.xp6(1),e.Q6J("ngIf",!o.editing),e.xp6(1),e.Q6J("ngIf",o.formGroup.showError("type",i,"required")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(23,tn)),e.xp6(1),e.Q6J("ngIf",o.formGroup.showError("perm",i,"required")),e.xp6(2),e.Q6J("form",o.formGroup)("submitText",e.lcZ(27,17,o.action)+" "+e.lcZ(28,19,o.resource))}},dependencies:[T.mk,T.sg,T.O5,X.z,B.p,fe.U,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.EJ,a.JJ,a.JL,a.sg,a.u,T.rS,_e.m]}),t})();var pe=c(58039),Xe=c(4416);function nn(t,n){1&t&&e._UZ(0,"input",17),2&t&&e.Q6J("readonly",!0)}function sn(t,n){1&t&&(e.TgZ(0,"option",21),e.SDv(1,22),e.qZA()),2&t&&e.Q6J("ngValue",null)}function an(t,n){if(1&t&&(e.TgZ(0,"option",23),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.Oqu(_)}}function ln(t,n){if(1&t&&(e.TgZ(0,"select",18),e.YNc(1,sn,2,1,"option",19),e.YNc(2,an,2,2,"option",20),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",null!==_.userCandidates),e.xp6(1),e.Q6J("ngForOf",_.userCandidates)}}function rn(t,n){1&t&&(e.TgZ(0,"span",24),e.SDv(1,25),e.qZA())}function cn(t,n){1&t&&(e.TgZ(0,"div",7)(1,"div",26)(2,"div",27),e._UZ(3,"input",28),e.TgZ(4,"label",29),e.SDv(5,30),e.qZA()()()())}function dn(t,n){1&t&&(e.TgZ(0,"span",24),e.SDv(1,37),e.qZA())}const Ve=function(t){return{required:t}};function un(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",31),e.SDv(2,32),e.qZA(),e.TgZ(3,"div",10)(4,"div",33),e._UZ(5,"input",34)(6,"button",35)(7,"cd-copy-2-clipboard-button",36),e.qZA(),e.YNc(8,dn,2,0,"span",13),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ve,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(3),e.Q6J("ngIf",_.formGroup.showError("access_key",o,"required"))}}function gn(t,n){1&t&&(e.TgZ(0,"span",24),e.SDv(1,43),e.qZA())}function Rn(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",38),e.SDv(2,39),e.qZA(),e.TgZ(3,"div",10)(4,"div",33),e._UZ(5,"input",40)(6,"button",41)(7,"cd-copy-2-clipboard-button",42),e.qZA(),e.YNc(8,gn,2,0,"span",13),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(1),e.Q6J("ngClass",e.VKq(3,Ve,!_.viewing)),e.xp6(4),e.Q6J("readonly",_.viewing),e.xp6(3),e.Q6J("ngIf",_.formGroup.showError("secret_key",o,"required"))}}let m_=(()=>{class t{constructor(_,o,i){this.formBuilder=_,this.activeModal=o,this.actionLabels=i,this.submitAction=new e.vpe,this.viewing=!0,this.userCandidates=[],this.resource="S3 Key",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({user:[null,[a.kI.required]],generate_key:[!0],access_key:[null,[m.h.requiredIf({generate_key:!1})]],secret_key:[null,[m.h.requiredIf({generate_key:!1})]]})}setViewing(_=!0){this.viewing=_,this.action=this.viewing?this.actionLabels.SHOW:this.actionLabels.CREATE}setValues(_,o,i){this.formGroup.setValue({user:_,generate_key:E().isEmpty(o),access_key:o,secret_key:i})}setUserCandidates(_){this.userCandidates=_}onSubmit(){this.submitAction.emit(this.formGroup.value),this.activeModal.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ae.O),e.Y36(G.Kz),e.Y36(I.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-s3-key-modal"]],outputs:{submitAction:"submitAction"},decls:23,vars:24,consts:function(){let n,_,o,i,s,l,r,d,u;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",o="-- Select a username --",i="This field is required.",s="Auto-generate key",l="Access key",r="This field is required.",d="Secret key",u="This field is required.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","user",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user","class","form-control","type","text","formControlName","user",3,"readonly",4,"ngIf"],["id","user","class","form-control","formControlName","user","autofocus","",4,"ngIf"],["class","invalid-feedback",4,"ngIf"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","showSubmit","submitActionEvent"],["id","user","type","text","formControlName","user",1,"form-control",3,"readonly"],["id","user","formControlName","user","autofocus","",1,"form-control"],[3,"ngValue",4,"ngIf"],[3,"value",4,"ngFor","ngForOf"],[3,"ngValue"],o,[3,"value"],[1,"invalid-feedback"],i,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],s,["for","access_key",1,"cd-col-form-label",3,"ngClass"],l,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control",3,"readonly"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],r,["for","secret_key",1,"cd-col-form-label",3,"ngClass"],d,["id","secret_key","type","password","formControlName","secret_key",1,"form-control",3,"readonly"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],u]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e.YNc(13,nn,1,1,"input",11),e.YNc(14,ln,3,2,"select",12),e.YNc(15,rn,2,0,"span",13),e.qZA()(),e.YNc(16,cn,6,0,"div",14),e.YNc(17,un,9,5,"div",14),e.YNc(18,Rn,9,5,"div",14),e.qZA(),e.TgZ(19,"div",15)(20,"cd-form-button-panel",16),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(21,"titlecase"),e.ALo(22,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,14,o.action))(e.lcZ(4,16,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.formGroup),e.xp6(4),e.Q6J("ngClass",e.VKq(22,Ve,!o.viewing)),e.xp6(3),e.Q6J("ngIf",o.viewing),e.xp6(1),e.Q6J("ngIf",!o.viewing),e.xp6(1),e.Q6J("ngIf",o.formGroup.showError("user",i,"required")),e.xp6(1),e.Q6J("ngIf",!o.viewing),e.xp6(1),e.Q6J("ngIf",!o.formGroup.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!o.formGroup.getValue("generate_key")),e.xp6(2),e.Q6J("form",o.formGroup)("submitText",e.lcZ(21,18,o.action)+" "+e.lcZ(22,20,o.resource))("showSubmit",!o.viewing)}},dependencies:[T.mk,T.sg,T.O5,X.z,pe.s,B.p,fe.U,Xe.C,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.Wl,a.EJ,a.JJ,a.JL,a.sg,a.u,T.rS,_e.m]}),t})();class Tn{}function En(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function fn(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,31),e.qZA())}function pn(t,n){if(1&t&&(e.TgZ(0,"option",32),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function mn(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,33),e.qZA())}function Mn(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,47),e.qZA())}function Sn(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",41),e.SDv(2,42),e.qZA(),e.TgZ(3,"div",10)(4,"div",43),e._UZ(5,"input",44)(6,"button",45)(7,"cd-copy-2-clipboard-button",46),e.qZA(),e.YNc(8,Mn,2,0,"span",15),e.qZA()()),2&t){const _=e.oxw(2),o=e.MAs(7);e.xp6(8),e.Q6J("ngIf",_.formGroup.showError("secret_key",o,"required"))}}function Cn(t,n){if(1&t&&(e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,34),e.qZA(),e.TgZ(3,"div",7)(4,"div",35)(5,"div",36),e._UZ(6,"input",37),e.TgZ(7,"label",38),e.SDv(8,39),e.qZA()()()(),e.YNc(9,Sn,9,1,"div",40),e.qZA()),2&t){const _=e.oxw();e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.formGroup.getValue("generate_secret"))}}const On=function(t){return{required:t}},Fn=function(){return["read","write"]};let Pn=(()=>{class t{constructor(_,o,i){this.formBuilder=_,this.bsModalRef=o,this.actionLabels=i,this.submitAction=new e.vpe,this.editing=!0,this.subusers=[],this.resource="Subuser",this.createForm()}createForm(){this.formGroup=this.formBuilder.group({uid:[null],subuid:[null,[a.kI.required,this.subuserValidator()]],perm:[null,[a.kI.required]],generate_secret:[!0],secret_key:[null,[m.h.requiredIf({generate_secret:!1})]]})}subuserValidator(){const _=this;return o=>_.editing||(0,m.P)(o.value)?null:_.subusers.some(s=>E().isEqual(_.getSubuserName(s.id),o.value))?{subuserIdExists:!0}:null}getSubuserName(_){if(E().isEmpty(_))return _;const o=_.match(/([^:]+)(:(.+))?/);return E().isUndefined(o[3])?o[1]:o[3]}setEditing(_=!0){this.editing=_,this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE}setValues(_,o="",i=""){this.formGroup.setValue({uid:_,subuid:this.getSubuserName(o),perm:i,generate_secret:!0,secret_key:null})}setSubusers(_){this.subusers=_}onSubmit(){const _=this.formGroup.value,o=new Tn;o.id=`${_.uid}:${_.subuid}`,o.permissions=_.perm,o.generate_secret=_.generate_secret,o.secret_key=_.secret_key,this.submitAction.emit(o),this.bsModalRef.close()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ae.O),e.Y36(G.Kz),e.Y36(I.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-subuser-modal"]],outputs:{submitAction:"submitAction"},decls:39,vars:26,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",o="Subuser",i="Permission",s="-- Select a permission --",l="read, write",r="full",d="This field is required.",u="The chosen subuser ID is already in use.",R="This field is required.",O="Swift key",F="Auto-generate secret",b="Secret key",h="This field is required.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","uid",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","uid","type","text","formControlName","uid",1,"form-control",3,"readonly"],["for","subuid",1,"cd-col-form-label",3,"ngClass"],o,["id","subuid","type","text","formControlName","subuid","autofocus","",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],["for","perm",1,"cd-col-form-label","required"],i,["id","perm","formControlName","perm",1,"form-select"],[3,"ngValue"],s,[3,"value",4,"ngFor","ngForOf"],["value","read-write"],l,["value","full-control"],r,[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,u,[3,"value"],R,O,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","generate_secret","type","checkbox","formControlName","generate_secret",1,"custom-control-input"],["for","generate_secret",1,"custom-control-label"],F,["class","form-group row",4,"ngIf"],["for","secret_key",1,"cd-col-form-label","required"],b,[1,"input-group"],["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],h]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.qZA()(),e.TgZ(14,"div",7)(15,"label",12),e.SDv(16,13),e.qZA(),e.TgZ(17,"div",10),e._UZ(18,"input",14),e.YNc(19,En,2,0,"span",15),e.YNc(20,fn,2,0,"span",15),e.qZA()(),e.TgZ(21,"div",7)(22,"label",16),e.SDv(23,17),e.qZA(),e.TgZ(24,"div",10)(25,"select",18)(26,"option",19),e.SDv(27,20),e.qZA(),e.YNc(28,pn,2,2,"option",21),e.TgZ(29,"option",22),e.SDv(30,23),e.qZA(),e.TgZ(31,"option",24),e.SDv(32,25),e.qZA()(),e.YNc(33,mn,2,0,"span",15),e.qZA()(),e.YNc(34,Cn,10,1,"fieldset",26),e.qZA(),e.TgZ(35,"div",27)(36,"cd-form-button-panel",28),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(37,"titlecase"),e.ALo(38,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.bsModalRef),e.xp6(4),e.pQV(e.lcZ(3,15,o.action))(e.lcZ(4,17,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.formGroup),e.xp6(7),e.Q6J("readonly",!0),e.xp6(2),e.Q6J("ngClass",e.VKq(23,On,!o.editing)),e.xp6(3),e.Q6J("readonly",o.editing),e.xp6(1),e.Q6J("ngIf",o.formGroup.showError("subuid",i,"required")),e.xp6(1),e.Q6J("ngIf",o.formGroup.showError("subuid",i,"subuserIdExists")),e.xp6(6),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",e.DdM(25,Fn)),e.xp6(5),e.Q6J("ngIf",o.formGroup.showError("perm",i,"required")),e.xp6(1),e.Q6J("ngIf",!o.editing),e.xp6(2),e.Q6J("form",o.formGroup)("submitText",e.lcZ(37,19,o.action)+" "+e.lcZ(38,21,o.resource))}},dependencies:[T.mk,T.sg,T.O5,X.z,pe.s,B.p,fe.U,Xe.C,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.Wl,a.EJ,a.JJ,a.JL,a.sg,a.u,T.rS,_e.m]}),t})();var M_=c(13472);let S_=(()=>{class t{constructor(_,o){this.activeModal=_,this.actionLabels=o,this.resource="Swift Key",this.action=this.actionLabels.SHOW}setValues(_,o){this.user=_,this.secret_key=o}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(I.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-swift-key-modal"]],decls:23,vars:11,consts:function(){let n,_,o;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Username",o="Secret key",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],[1,"modal-body"],["novalidate",""],[1,"form-group","row"],["for","user",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","user","name","user","type","text",1,"form-control",3,"readonly","ngModel","ngModelChange"],["for","secret_key",1,"cd-col-form-label"],o,[1,"input-group"],["id","secret_key","name","secret_key","type","password",1,"form-control",3,"ngModel","readonly","ngModelChange"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],[1,"modal-footer"],[3,"backAction"]]},template:function(_,o){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"div",4)(7,"form",5)(8,"div",6)(9,"label",7),e.SDv(10,8),e.qZA(),e.TgZ(11,"div",9)(12,"input",10),e.NdJ("ngModelChange",function(s){return o.user=s}),e.qZA()()(),e.TgZ(13,"div",6)(14,"label",11),e.SDv(15,12),e.qZA(),e.TgZ(16,"div",9)(17,"div",13)(18,"input",14),e.NdJ("ngModelChange",function(s){return o.secret_key=s}),e.qZA(),e._UZ(19,"button",15)(20,"cd-copy-2-clipboard-button",16),e.qZA()()()()(),e.TgZ(21,"div",17)(22,"cd-back-button",18),e.NdJ("backAction",function(){return o.activeModal.close()}),e.qZA()(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,7,o.action))(e.lcZ(4,9,o.resource)),e.QtT(2),e.xp6(8),e.Q6J("readonly",!0)("ngModel",o.user),e.xp6(6),e.Q6J("ngModel",o.secret_key)("readonly",!0))},dependencies:[M_.W,X.z,pe.s,Xe.C,k.o,q.b,z.P,a._Y,a.Fj,a.JJ,a.JL,a.On,a.F,T.rS,_e.m]}),t})();var Nn=c(17932);function Gn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,51),e.qZA())}function An(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,52),e.qZA())}function In(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,53),e.qZA())}function bn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,57),e.qZA())}function hn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,58),e.qZA())}function Ln(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",54),e.SDv(2,55),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",56),e.YNc(5,bn,2,0,"span",13),e.YNc(6,hn,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(4),e.Q6J("readonly",o.editing),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("tenant",_,"pattern")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("tenant",_,"notUnique"))}}function Wn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,59),e.qZA())}function $n(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,60),e.qZA())}function Zn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,61),e.qZA())}function Dn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,62),e.qZA())}function Un(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,65),e.qZA())}function vn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,66),e.qZA())}function yn(t,n){if(1&t&&(e.TgZ(0,"div",8),e._UZ(1,"label",63),e.TgZ(2,"div",11),e._UZ(3,"input",64),e.YNc(4,Un,2,0,"span",13),e.YNc(5,vn,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(4),e.Q6J("ngIf",o.userForm.showError("max_buckets",_,"required")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("max_buckets",_,"min"))}}function wn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,77),e.qZA())}function xn(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",71),e.SDv(2,72),e.qZA(),e.TgZ(3,"div",11)(4,"div",73),e._UZ(5,"input",74)(6,"button",75)(7,"cd-copy-2-clipboard-button",76),e.qZA(),e.YNc(8,wn,2,0,"span",13),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(8),e.Q6J("ngIf",o.userForm.showError("access_key",_,"required"))}}function kn(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,83),e.qZA())}function zn(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",78),e.SDv(2,79),e.qZA(),e.TgZ(3,"div",11)(4,"div",73),e._UZ(5,"input",80)(6,"button",81)(7,"cd-copy-2-clipboard-button",82),e.qZA(),e.YNc(8,kn,2,0,"span",13),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(8),e.Q6J("ngIf",o.userForm.showError("secret_key",_,"required"))}}function qn(t,n){if(1&t&&(e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,67),e.qZA(),e.TgZ(3,"div",8)(4,"div",14)(5,"div",15),e._UZ(6,"input",68),e.TgZ(7,"label",69),e.SDv(8,70),e.qZA()()()(),e.YNc(9,xn,9,1,"div",19),e.YNc(10,zn,9,1,"div",19),e.qZA()),2&t){const _=e.oxw(2);e.xp6(9),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key")),e.xp6(1),e.Q6J("ngIf",!_.editing&&!_.userForm.getValue("generate_key"))}}function Hn(t,n){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,96),e.qZA()())}const K=function(t){return[t]};function Xn(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"span",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"span",97),e._UZ(6,"i"),e.qZA(),e._UZ(7,"input",98),e.TgZ(8,"button",99),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(3);return e.KtG(l.showSubuserModal(s))}),e._UZ(9,"i",91),e.qZA(),e.TgZ(10,"button",100),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(3);return e.KtG(l.deleteSubuser(s))}),e._UZ(11,"i",91),e.qZA()(),e._UZ(12,"span",95),e.qZA()}if(2&t){const _=n.$implicit,o=e.oxw(3);e.xp6(3),e.Tol(o.icons.user),e.xp6(1),e.s9C("value",_.id),e.xp6(2),e.Tol(o.icons.share),e.xp6(1),e.s9C("value","full-control"===_.permissions?"full":_.permissions),e.xp6(2),e.Q6J("ngClass",e.VKq(10,K,o.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(12,K,o.icons.destroy))}}function Bn(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,84),e.qZA(),e.TgZ(3,"div",85)(4,"div",14),e.YNc(5,Hn,3,0,"span",86),e.YNc(6,Xn,13,14,"span",87),e.TgZ(7,"div",88)(8,"div",89)(9,"button",90),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(2);return e.KtG(i.showSubuserModal())}),e._UZ(10,"i",91),e.ynx(11),e.SDv(12,92),e.ALo(13,"titlecase"),e.ALo(14,"upperFirst"),e.BQk(),e.qZA()()(),e._UZ(15,"span",93),e.qZA()()()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.subusers.length),e.xp6(1),e.Q6J("ngForOf",_.subusers),e.xp6(4),e.Q6J("ngClass",e.VKq(9,K,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(13,5,_.actionLabels.CREATE))(e.lcZ(14,7,_.subuserLabel)),e.QtT(12)}}function Qn(t,n){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,106),e.qZA()())}function Yn(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"div",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"button",107),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(3);return e.KtG(l.showS3KeyModal(s))}),e._UZ(6,"i",91),e.qZA(),e.TgZ(7,"button",108),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(3);return e.KtG(l.deleteS3Key(s))}),e._UZ(8,"i",91),e.qZA()(),e._UZ(9,"span",95),e.qZA()}if(2&t){const _=n.$implicit,o=e.oxw(3);e.xp6(3),e.Tol(o.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(2),e.Q6J("ngClass",e.VKq(6,K,o.icons.show)),e.xp6(2),e.Q6J("ngClass",e.VKq(8,K,o.icons.destroy))}}function Jn(t,n){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,109),e.qZA()())}function Kn(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"span",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"button",110),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(3);return e.KtG(l.showSwiftKeyModal(s))}),e._UZ(6,"i",91),e.qZA()(),e._UZ(7,"span",95),e.qZA()}if(2&t){const _=n.$implicit,o=e.oxw(3);e.xp6(3),e.Tol(o.icons.key),e.xp6(1),e.s9C("value",_.user),e.xp6(2),e.Q6J("ngClass",e.VKq(5,K,o.icons.show))}}function Vn(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,101),e.qZA(),e.TgZ(3,"div",8)(4,"label",63),e.SDv(5,102),e.qZA(),e.TgZ(6,"div",11),e.YNc(7,Qn,3,0,"span",86),e.YNc(8,Yn,10,10,"span",87),e.TgZ(9,"div",88)(10,"div",89)(11,"button",103),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(2);return e.KtG(i.showS3KeyModal())}),e._UZ(12,"i",91),e.ynx(13),e.SDv(14,104),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA()()(),e._UZ(17,"span",93),e.qZA(),e._UZ(18,"hr"),e.qZA(),e.TgZ(19,"div",8)(20,"label",63),e.SDv(21,105),e.qZA(),e.TgZ(22,"div",11),e.YNc(23,Jn,3,0,"span",86),e.YNc(24,Kn,8,7,"span",87),e.qZA()()()}if(2&t){const _=e.oxw(2);e.xp6(7),e.Q6J("ngIf",0===_.s3Keys.length),e.xp6(1),e.Q6J("ngForOf",_.s3Keys),e.xp6(4),e.Q6J("ngClass",e.VKq(11,K,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,7,_.actionLabels.CREATE))(e.lcZ(16,9,_.s3keyLabel)),e.QtT(14),e.xp6(7),e.Q6J("ngIf",0===_.swiftKeys.length),e.xp6(1),e.Q6J("ngForOf",_.swiftKeys)}}function jn(t,n){1&t&&(e.TgZ(0,"span",94)(1,"span",95),e.SDv(2,114),e.qZA()())}function ei(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"span")(1,"div",73)(2,"div",97),e._UZ(3,"i"),e.qZA(),e._UZ(4,"input",98),e.TgZ(5,"button",115),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(3);return e.KtG(l.showCapabilityModal(s))}),e._UZ(6,"i",91),e.qZA(),e.TgZ(7,"button",116),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(3);return e.KtG(l.deleteCapability(s))}),e._UZ(8,"i",91),e.qZA()(),e._UZ(9,"span",95),e.qZA()}if(2&t){const _=n.$implicit,o=e.oxw(3);e.xp6(3),e.Tol(o.icons.share),e.xp6(1),e.hYB("value","",_.type,":",_.perm,""),e.xp6(2),e.Q6J("ngClass",e.VKq(7,K,o.icons.edit)),e.xp6(2),e.Q6J("ngClass",e.VKq(9,K,o.icons.destroy))}}function _i(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"fieldset")(1,"legend"),e.SDv(2,111),e.qZA(),e.TgZ(3,"div",8)(4,"div",14),e.YNc(5,jn,3,0,"span",86),e.YNc(6,ei,10,11,"span",87),e.TgZ(7,"div",88)(8,"div",89)(9,"button",112),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(2);return e.KtG(i.showCapabilityModal())}),e.ALo(10,"pipeFunction"),e.ALo(11,"pipeFunction"),e._UZ(12,"i",91),e.ynx(13),e.SDv(14,113),e.ALo(15,"titlecase"),e.ALo(16,"upperFirst"),e.BQk(),e.qZA()()(),e._UZ(17,"span",93),e.qZA()()()}if(2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngIf",0===_.capabilities.length),e.xp6(1),e.Q6J("ngForOf",_.capabilities),e.xp6(3),e.Q6J("disabled",e.xi3(10,7,_.capabilities,_.hasAllCapabilities))("disableTooltip",!e.xi3(11,10,_.capabilities,_.hasAllCapabilities)),e.xp6(3),e.Q6J("ngClass",e.VKq(17,K,_.icons.add)),e.xp6(4),e.pQV(e.lcZ(15,13,_.actionLabels.ADD))(e.lcZ(16,15,_.capabilityLabel)),e.QtT(14)}}function ti(t,n){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",117),e.TgZ(4,"label",118),e.SDv(5,119),e.qZA()()()())}function oi(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,123),e.qZA())}function ni(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,124),e.qZA())}function ii(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,125),e.qZA())}function si(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",120),e.SDv(2,121),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",122),e.YNc(5,oi,2,0,"span",13),e.YNc(6,ni,2,0,"span",13),e.YNc(7,ii,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.userForm.showError("user_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("user_quota_max_size",_,"quotaMaxSize")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("user_quota_max_size",o.formDir,"pattern"))}}function ai(t,n){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",126),e.TgZ(4,"label",127),e.SDv(5,128),e.qZA()()()())}function li(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,132),e.qZA())}function ri(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,133),e.qZA())}function ci(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",129),e.SDv(2,130),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",131),e.YNc(5,li,2,0,"span",13),e.YNc(6,ri,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.userForm.showError("user_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("user_quota_max_objects",_,"min"))}}function di(t,n){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",134),e.TgZ(4,"label",135),e.SDv(5,136),e.qZA()()()())}function ui(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,140),e.qZA())}function gi(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,141),e.qZA())}function Ri(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,142),e.qZA())}function Ti(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",137),e.SDv(2,138),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",139),e.YNc(5,ui,2,0,"span",13),e.YNc(6,gi,2,0,"span",13),e.YNc(7,Ri,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.userForm.showError("bucket_quota_max_size",_,"required")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("bucket_quota_max_size",_,"quotaMaxSize")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("bucket_quota_max_size",o.formDir,"pattern"))}}function Ei(t,n){1&t&&(e.TgZ(0,"div",8)(1,"div",14)(2,"div",15),e._UZ(3,"input",143),e.TgZ(4,"label",144),e.SDv(5,145),e.qZA()()()())}function fi(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,149),e.qZA())}function pi(t,n){1&t&&(e.TgZ(0,"span",50),e.SDv(1,150),e.qZA())}function mi(t,n){if(1&t&&(e.TgZ(0,"div",8)(1,"label",146),e.SDv(2,147),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",148),e.YNc(5,fi,2,0,"span",13),e.YNc(6,pi,2,0,"span",13),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.userForm.showError("bucket_quota_max_objects",_,"required")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("bucket_quota_max_objects",_,"min"))}}const C_=function(t){return{required:t}};function Mi(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7)(9,"div",8)(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,Gn,2,0,"span",13),e.YNc(15,An,2,0,"span",13),e.YNc(16,In,2,0,"span",13),e.qZA()(),e.TgZ(17,"div",8)(18,"div",14)(19,"div",15)(20,"input",16),e.NdJ("click",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.updateFieldsWhenTenanted())}),e.qZA(),e.TgZ(21,"label",17),e.SDv(22,18),e.qZA()()()(),e.YNc(23,Ln,7,3,"div",19),e.TgZ(24,"div",8)(25,"label",20),e.SDv(26,21),e.qZA(),e.TgZ(27,"div",11),e._UZ(28,"input",22),e.YNc(29,Wn,2,0,"span",13),e.YNc(30,$n,2,0,"span",13),e.qZA()(),e.TgZ(31,"div",8)(32,"label",23),e.SDv(33,24),e.qZA(),e.TgZ(34,"div",11),e._UZ(35,"input",25),e.YNc(36,Zn,2,0,"span",13),e.YNc(37,Dn,2,0,"span",13),e.qZA()(),e.TgZ(38,"div",8)(39,"label",26),e.SDv(40,27),e.qZA(),e.TgZ(41,"div",11)(42,"select",28),e.NdJ("change",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.onMaxBucketsModeChange(i.target.value))}),e.TgZ(43,"option",29),e.SDv(44,30),e.qZA(),e.TgZ(45,"option",31),e.SDv(46,32),e.qZA(),e.TgZ(47,"option",33),e.SDv(48,34),e.qZA()()()(),e.YNc(49,yn,6,2,"div",19),e.TgZ(50,"div",8)(51,"div",14)(52,"div",15),e._UZ(53,"input",35),e.TgZ(54,"label",36),e.SDv(55,37),e.qZA(),e.TgZ(56,"cd-helper"),e.SDv(57,38),e.qZA()()()(),e.YNc(58,qn,11,2,"fieldset",39),e.YNc(59,Bn,16,11,"fieldset",39),e.YNc(60,Vn,25,13,"fieldset",39),e.YNc(61,_i,18,19,"fieldset",39),e.TgZ(62,"fieldset")(63,"legend"),e.SDv(64,40),e.qZA(),e.TgZ(65,"div",8)(66,"div",14)(67,"div",15),e._UZ(68,"input",41),e.TgZ(69,"label",42),e.SDv(70,43),e.qZA()()()(),e.YNc(71,ti,6,0,"div",19),e.YNc(72,si,8,3,"div",19),e.YNc(73,ai,6,0,"div",19),e.YNc(74,ci,7,2,"div",19),e.qZA(),e.TgZ(75,"fieldset")(76,"legend"),e.SDv(77,44),e.qZA(),e.TgZ(78,"div",8)(79,"div",14)(80,"div",15),e._UZ(81,"input",45),e.TgZ(82,"label",46),e.SDv(83,47),e.qZA()()()(),e.YNc(84,di,6,0,"div",19),e.YNc(85,Ti,8,3,"div",19),e.YNc(86,Ei,6,0,"div",19),e.YNc(87,mi,7,2,"div",19),e.qZA()(),e.TgZ(88,"div",48)(89,"cd-form-button-panel",49),e.NdJ("submitActionEvent",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.onSubmit())}),e.ALo(90,"titlecase"),e.ALo(91,"upperFirst"),e.qZA()()()()()}if(2&t){const _=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.userForm),e.xp6(6),e.pQV(e.lcZ(6,30,o.action))(e.lcZ(7,32,o.resource)),e.QtT(5),e.xp6(3),e.Q6J("ngClass",e.VKq(38,C_,!o.editing)),e.xp6(3),e.Q6J("readonly",o.editing),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("user_id",_,"required")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("user_id",_,"pattern")),e.xp6(1),e.Q6J("ngIf",!o.userForm.getValue("show_tenant")&&o.userForm.showError("user_id",_,"notUnique")),e.xp6(4),e.Q6J("readonly",!0),e.xp6(3),e.Q6J("ngIf",o.userForm.getValue("show_tenant")),e.xp6(2),e.Q6J("ngClass",e.VKq(40,C_,!o.editing)),e.xp6(4),e.Q6J("ngIf",o.userForm.showError("display_name",_,"pattern")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("display_name",_,"required")),e.xp6(6),e.Q6J("ngIf",o.userForm.showError("email",_,"email")),e.xp6(1),e.Q6J("ngIf",o.userForm.showError("email",_,"notUnique")),e.xp6(12),e.Q6J("ngIf",1==o.userForm.get("max_buckets_mode").value),e.xp6(9),e.Q6J("ngIf",!o.editing),e.xp6(1),e.Q6J("ngIf",o.editing),e.xp6(1),e.Q6J("ngIf",o.editing),e.xp6(1),e.Q6J("ngIf",o.editing),e.xp6(10),e.Q6J("ngIf",o.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",o.userForm.controls.user_quota_enabled.value&&!o.userForm.getValue("user_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",o.userForm.controls.user_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",o.userForm.controls.user_quota_enabled.value&&!o.userForm.getValue("user_quota_max_objects_unlimited")),e.xp6(10),e.Q6J("ngIf",o.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",o.userForm.controls.bucket_quota_enabled.value&&!o.userForm.getValue("bucket_quota_max_size_unlimited")),e.xp6(1),e.Q6J("ngIf",o.userForm.controls.bucket_quota_enabled.value),e.xp6(1),e.Q6J("ngIf",o.userForm.controls.bucket_quota_enabled.value&&!o.userForm.getValue("bucket_quota_max_objects_unlimited")),e.xp6(2),e.Q6J("form",o.userForm)("submitText",e.lcZ(90,34,o.action)+" "+e.lcZ(91,36,o.resource))}}let O_=(()=>{class t extends l_.E{constructor(_,o,i,s,l,r,d){super(),this.formBuilder=_,this.route=o,this.router=i,this.rgwUserService=s,this.modalService=l,this.notificationService=r,this.actionLabels=d,this.editing=!1,this.submitObservables=[],this.icons=$.P,this.subusers=[],this.s3Keys=[],this.swiftKeys=[],this.capabilities=[],this.showTenant=!1,this.previousTenant=null,this.resource="user",this.subuserLabel="subuser",this.s3keyLabel="S3 Key",this.capabilityLabel="capability",this.editing=this.router.url.startsWith(`/rgw/user/${I.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.createForm()}createForm(){this.userForm=this.formBuilder.group({user_id:[null,[a.kI.required,a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[m.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("tenant"))]],show_tenant:[this.editing],tenant:[null,[a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_-]+$/)],this.editing?[]:[m.h.unique(this.rgwUserService.exists,this.rgwUserService,()=>this.userForm.getValue("user_id"),!0)]],display_name:[null,[a.kI.required,a.kI.pattern(/^[a-zA-Z0-9!@#%^&*()_ -]+$/)]],email:[null,[m.h.email],[m.h.unique(this.rgwUserService.emailExists,this.rgwUserService)]],max_buckets_mode:[1],max_buckets:[1e3,[m.h.requiredIf({max_buckets_mode:"1"}),m.h.number(!1)]],suspended:[!1],generate_key:[!0],access_key:[null,[m.h.requiredIf({generate_key:!1})]],secret_key:[null,[m.h.requiredIf({generate_key:!1})]],user_quota_enabled:[!1],user_quota_max_size_unlimited:[!0],user_quota_max_size:[null,[m.h.composeIf({user_quota_enabled:!0,user_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],user_quota_max_objects_unlimited:[!0],user_quota_max_objects:[null,[m.h.requiredIf({user_quota_enabled:!0,user_quota_max_objects_unlimited:!1})]],bucket_quota_enabled:[!1],bucket_quota_max_size_unlimited:[!0],bucket_quota_max_size:[null,[m.h.composeIf({bucket_quota_enabled:!0,bucket_quota_max_size_unlimited:!1},[a.kI.required,this.quotaMaxSizeValidator])]],bucket_quota_max_objects_unlimited:[!0],bucket_quota_max_objects:[null,[m.h.requiredIf({bucket_quota_enabled:!0,bucket_quota_max_objects_unlimited:!1})]]})}ngOnInit(){this.route.params.subscribe(_=>{if(!_.hasOwnProperty("uid"))return void this.loadingReady();const o=decodeURIComponent(_.uid),i=[];i.push(this.rgwUserService.get(o)),i.push(this.rgwUserService.getQuota(o)),(0,ne.D)(i).subscribe(s=>{const l=E().clone(this.userForm.value);let r=E().pick(s[0],E().keys(this.userForm.value));switch(r.max_buckets){case-1:r.max_buckets_mode=-1,r.max_buckets="";break;case 0:r.max_buckets_mode=0,r.max_buckets="";break;default:r.max_buckets_mode=1}["user","bucket"].forEach(u=>{const R=s[1][u+"_quota"];r[u+"_quota_enabled"]=R.enabled,R.max_size<0?(r[u+"_quota_max_size_unlimited"]=!0,r[u+"_quota_max_size"]=null):(r[u+"_quota_max_size_unlimited"]=!1,r[u+"_quota_max_size"]=`${R.max_size} B`),R.max_objects<0?(r[u+"_quota_max_objects_unlimited"]=!0,r[u+"_quota_max_objects"]=null):(r[u+"_quota_max_objects_unlimited"]=!1,r[u+"_quota_max_objects"]=R.max_objects)}),r=E().merge(l,r),this.userForm.setValue(r),this.subusers=s[0].subusers,this.s3Keys=s[0].keys,this.swiftKeys=s[0].swift_keys;const d={"read, write":"*"};s[0].caps.forEach(u=>{u.perm in d&&(u.perm=d[u.perm])}),this.capabilities=s[0].caps,this.loadingReady()},()=>{this.loadingError()})})}goToListView(){this.router.navigate(["/rgw/user"])}onSubmit(){let _;if(this.userForm.pristine)return void this.goToListView();const o=this.getUID();if(this.editing){if(this._isGeneralDirty()){const i=this._getUpdateArgs();this.submitObservables.push(this.rgwUserService.update(o,i))}_="Updated Object Gateway user '" + o + "'"}else{const i=this._getCreateArgs();this.submitObservables.push(this.rgwUserService.create(i)),_="Created Object Gateway user '" + o + "'"}if(this._isUserQuotaDirty()){const i=this._getUserQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(o,i))}if(this._isBucketQuotaDirty()){const i=this._getBucketQuotaArgs();this.submitObservables.push(this.rgwUserService.updateQuota(o,i))}(0,Bo.z)(...this.submitObservables).subscribe({error:()=>{this.userForm.setErrors({cdSubmitButton:!0})},complete:()=>{this.notificationService.show(w.k.success,_),this.goToListView()}})}updateFieldsWhenTenanted(){this.showTenant=this.userForm.getValue("show_tenant"),this.showTenant?(this.userForm.get("user_id").markAsTouched(),this.previousTenant=this.userForm.get("tenant").value,this.userForm.get("tenant").patchValue(null)):(this.userForm.get("user_id").markAsUntouched(),this.userForm.get("tenant").patchValue(this.previousTenant))}getUID(){let _=this.userForm.getValue("user_id");const o=this.userForm?.getValue("tenant");return o&&o.length>0&&(_=`${this.userForm.getValue("tenant")}$${_}`),_}quotaMaxSizeValidator(_){return(0,m.P)(_.value)?null:null===RegExp("^(\\d+(\\.\\d+)?)\\s*(B|K(B|iB)?|M(B|iB)?|G(B|iB)?|T(B|iB)?)?$","i").exec(_.value)||(new Ke.H).toBytes(_.value)<1024?{quotaMaxSize:!0}:null}setSubuser(_,o){const i={"full-control":"full","read-write":"readwrite"},s=this.getUID();this.submitObservables.push(this.rgwUserService.createSubuser(s,{subuser:_.id,access:_.permissions in i?i[_.permissions]:_.permissions,key_type:"swift",secret_key:_.secret_key,generate_secret:_.generate_secret?"true":"false"})),E().isNumber(o)?this.subusers[o]=_:(this.subusers.push(_),this.swiftKeys.push({user:_.id,secret_key:_.generate_secret?"Apply your changes first...":_.secret_key})),this.userForm.markAsDirty()}deleteSubuser(_){const o=this.subusers[_];this.submitObservables.push(this.rgwUserService.deleteSubuser(this.getUID(),o.id)),this.s3Keys=this.s3Keys.filter(i=>i.user!==o.id),this.swiftKeys=this.swiftKeys.filter(i=>i.user!==o.id),this.subusers.splice(_,1),this.userForm.markAsDirty()}setCapability(_,o){const i=this.getUID();if(E().isNumber(o)){const s=this.capabilities[o];this.submitObservables.push(this.rgwUserService.deleteCapability(i,s.type,s.perm)),this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities[o]=_}else this.submitObservables.push(this.rgwUserService.addCapability(i,_.type,_.perm)),this.capabilities=[...this.capabilities,_];this.userForm.markAsDirty()}deleteCapability(_){const o=this.capabilities[_];this.submitObservables.push(this.rgwUserService.deleteCapability(this.getUID(),o.type,o.perm)),this.capabilities.splice(_,1),this.capabilities=[...this.capabilities],this.userForm.markAsDirty()}hasAllCapabilities(_){return!E().difference(p_.getAll(),E().map(_,"type")).length}setS3Key(_,o){if(!E().isNumber(o)){const i=_.user.match(/([^:]+)(:(.+))?/),s=i[1],l={subuser:i[2]?i[3]:"",generate_key:_.generate_key?"true":"false"};"false"===l.generate_key&&(E().isNil(_.access_key)||(l.access_key=_.access_key),E().isNil(_.secret_key)||(l.secret_key=_.secret_key)),this.submitObservables.push(this.rgwUserService.addS3Key(s,l)),this.s3Keys.push({user:_.user,access_key:_.generate_key?"Apply your changes first...":_.access_key,secret_key:_.generate_key?"Apply your changes first...":_.secret_key})}this.userForm.markAsDirty()}deleteS3Key(_){const o=this.s3Keys[_];this.submitObservables.push(this.rgwUserService.deleteS3Key(this.getUID(),o.access_key)),this.s3Keys.splice(_,1),this.userForm.markAsDirty()}showSubuserModal(_){const o=this.getUID(),i=this.modalService.show(Pn);if(E().isNumber(_)){const s=this.subusers[_];i.componentInstance.setEditing(),i.componentInstance.setValues(o,s.id,s.permissions)}else i.componentInstance.setEditing(!1),i.componentInstance.setValues(o),i.componentInstance.setSubusers(this.subusers);i.componentInstance.submitAction.subscribe(s=>{this.setSubuser(s,_)})}showS3KeyModal(_){const o=this.modalService.show(m_);if(E().isNumber(_)){const i=this.s3Keys[_];o.componentInstance.setViewing(),o.componentInstance.setValues(i.user,i.access_key,i.secret_key)}else{const i=this._getS3KeyUserCandidates();o.componentInstance.setViewing(!1),o.componentInstance.setUserCandidates(i),o.componentInstance.submitAction.subscribe(s=>{this.setS3Key(s)})}}showSwiftKeyModal(_){const o=this.modalService.show(S_),i=this.swiftKeys[_];o.componentInstance.setValues(i.user,i.secret_key)}showCapabilityModal(_){const o=this.modalService.show(on);if(E().isNumber(_)){const i=this.capabilities[_];o.componentInstance.setEditing(),o.componentInstance.setValues(i.type,i.perm)}else o.componentInstance.setEditing(!1),o.componentInstance.setCapabilities(this.capabilities);o.componentInstance.submitAction.subscribe(i=>{this.setCapability(i,_)})}_isGeneralDirty(){return["display_name","email","max_buckets_mode","max_buckets","suspended"].some(_=>this.userForm.get(_).dirty)}_isUserQuotaDirty(){return["user_quota_enabled","user_quota_max_size_unlimited","user_quota_max_size","user_quota_max_objects_unlimited","user_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_isBucketQuotaDirty(){return["bucket_quota_enabled","bucket_quota_max_size_unlimited","bucket_quota_max_size","bucket_quota_max_objects_unlimited","bucket_quota_max_objects"].some(_=>this.userForm.get(_).dirty)}_getCreateArgs(){const _={uid:this.getUID(),display_name:this.userForm.getValue("display_name"),suspended:this.userForm.getValue("suspended"),email:"",max_buckets:this.userForm.getValue("max_buckets"),generate_key:this.userForm.getValue("generate_key"),access_key:"",secret_key:""},o=this.userForm.getValue("email");E().isString(o)&&o.length>0&&E().merge(_,{email:o}),this.userForm.getValue("generate_key")||E().merge(_,{generate_key:!1,access_key:this.userForm.getValue("access_key"),secret_key:this.userForm.getValue("secret_key")});const s=parseInt(this.userForm.getValue("max_buckets_mode"),10);return E().includes([-1,0],s)&&E().merge(_,{max_buckets:s}),_}_getUpdateArgs(){const _={},o=["display_name","email","max_buckets","suspended"];for(const s of o)_[s]=this.userForm.getValue(s);const i=parseInt(this.userForm.getValue("max_buckets_mode"),10);return E().includes([-1,0],i)&&(_.max_buckets=i),_}_getUserQuotaArgs(){const _={quota_type:"user",enabled:this.userForm.getValue("user_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("user_quota_max_size_unlimited")){const o=(new Ke.H).toBytes(this.userForm.getValue("user_quota_max_size"));_.max_size_kb=(o/1024).toFixed(0)}return this.userForm.getValue("user_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("user_quota_max_objects")),_}_getBucketQuotaArgs(){const _={quota_type:"bucket",enabled:this.userForm.getValue("bucket_quota_enabled"),max_size_kb:-1,max_objects:-1};if(!this.userForm.getValue("bucket_quota_max_size_unlimited")){const o=(new Ke.H).toBytes(this.userForm.getValue("bucket_quota_max_size"));_.max_size_kb=(o/1024).toFixed(0)}return this.userForm.getValue("bucket_quota_max_objects_unlimited")||(_.max_objects=this.userForm.getValue("bucket_quota_max_objects")),_}_getS3KeyUserCandidates(){let _=[];const o=this.getUID();return E().isString(o)&&!E().isEmpty(o)&&_.push(o),this.subusers.forEach(i=>{_.push(i.id)}),this.s3Keys.forEach(i=>{_.push(i.user)}),_=E().uniq(_),_}onMaxBucketsModeChange(_){"1"===_&&(this.userForm.get("max_buckets").valid||this.userForm.patchValue({max_buckets:1e3}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ae.O),e.Y36(J.gz),e.Y36(J.F0),e.Y36(Q),e.Y36(ee.Z),e.Y36(Y.g),e.Y36(I.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-form"]],features:[e.qOj],decls:1,vars:1,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W,C,Z,D,U,v,y,f,P,N,te,A,Me,Se,Ce,Oe,Fe,Pe,Ne,Ge,Ae,Ie,be,he,Le,We,$e,Ze,De,Ue,ve,ye,we,p,W_,$_,Z_,D_,U_,v_,y_,w_,x_,k_,z_,q_,H_,X_,B_,Q_,Y_,J_,K_,V_;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="User ID",o="Show Tenant",i="Full name",s="Email address",l="Max. buckets",r="Disabled",d="Unlimited",u="Custom",R="Suspended",O="Suspending the user disables the user and subuser.",F="User quota",b="Enabled",h="Bucket quota",M="Enabled",L="This field is required.",S="The value is not valid.",W="The chosen user ID is already in use.",C="Tenant",Z="The value is not valid.",D="The chosen user ID exists in this tenant.",U="The value is not valid.",v="This field is required.",y="This is not a valid email address.",f="The chosen email address is already in use.",P="This field is required.",N="The entered value must be >= 1.",te="S3 key",A="Auto-generate key",Me="Access key",Se="This field is required.",Ce="Secret key",Oe="This field is required.",Fe="Subusers",Pe="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",Ne="There are no subusers.",Ge="Edit",Ae="Delete",Ie="Keys",be="S3",he="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",Le="Swift",We="There are no keys.",$e="Show",Ze="Delete",De="There are no keys.",Ue="Show",ve="Capabilities",ye="All capabilities are already added.",we="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",p="There are no capabilities.",W_="Edit",$_="Delete",Z_="Unlimited size",D_="Max. size",U_="This field is required.",v_="The value is not valid.",y_="Size must be a number or in a valid format. eg: 5 GiB",w_="Unlimited objects",x_="Max. objects",k_="This field is required.",z_="The entered value must be >= 0.",q_="Unlimited size",H_="Max. size",X_="This field is required.",B_="The value is not valid.",Q_="Size must be a number or in a valid format. eg: 5 GiB",Y_="Unlimited objects",J_="Max. objects",K_="This field is required.",V_="The entered value must be >= 0.",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"card"],[1,"card-header"],n,[1,"card-body"],[1,"form-group","row"],["for","user_id",1,"cd-col-form-label",3,"ngClass"],_,[1,"cd-col-form-input"],["id","user_id","type","text","formControlName","user_id",1,"form-control",3,"readonly"],["class","invalid-feedback",4,"ngIf"],[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","show_tenant","type","checkbox","formControlName","show_tenant",1,"custom-control-input",3,"readonly","click"],["for","show_tenant",1,"custom-control-label"],o,["class","form-group row",4,"ngIf"],["for","display_name",1,"cd-col-form-label",3,"ngClass"],i,["id","display_name","type","text","formControlName","display_name",1,"form-control"],["for","email",1,"cd-col-form-label"],s,["id","email","type","text","formControlName","email",1,"form-control"],["for","max_buckets_mode",1,"cd-col-form-label"],l,["formControlName","max_buckets_mode","name","max_buckets_mode","id","max_buckets_mode",1,"form-select",3,"change"],["value","-1"],r,["value","0"],d,["value","1"],u,["id","suspended","type","checkbox","formControlName","suspended",1,"custom-control-input"],["for","suspended",1,"custom-control-label"],R,O,[4,"ngIf"],F,["id","user_quota_enabled","type","checkbox","formControlName","user_quota_enabled",1,"custom-control-input"],["for","user_quota_enabled",1,"custom-control-label"],b,h,["id","bucket_quota_enabled","type","checkbox","formControlName","bucket_quota_enabled",1,"custom-control-input"],["for","bucket_quota_enabled",1,"custom-control-label"],M,[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],L,S,W,["for","tenant",1,"cd-col-form-label"],C,["id","tenant","type","text","formControlName","tenant","autofocus","",1,"form-control",3,"readonly"],Z,D,U,v,y,f,[1,"cd-col-form-label"],["id","max_buckets","type","number","formControlName","max_buckets","min","1",1,"form-control"],P,N,te,["id","generate_key","type","checkbox","formControlName","generate_key",1,"custom-control-input"],["for","generate_key",1,"custom-control-label"],A,["for","access_key",1,"cd-col-form-label","required"],Me,[1,"input-group"],["id","access_key","type","password","formControlName","access_key",1,"form-control"],["type","button","cdPasswordButton","access_key",1,"btn","btn-light"],["source","access_key"],Se,["for","secret_key",1,"cd-col-form-label","required"],Ce,["id","secret_key","type","password","formControlName","secret_key",1,"form-control"],["type","button","cdPasswordButton","secret_key",1,"btn","btn-light"],["source","secret_key"],Oe,Fe,[1,"row"],["class","no-border",4,"ngIf"],[4,"ngFor","ngForOf"],[1,"row","my-2"],[1,"col-12"],["type","button",1,"btn","btn-light","float-end","tc_addSubuserButton",3,"click"],[3,"ngClass"],Pe,[1,"help-block"],[1,"no-border"],[1,"form-text","text-muted"],Ne,[1,"input-group-text"],["type","text","readonly","",1,"cd-form-control",3,"value"],["type","button","ngbTooltip",Ge,1,"btn","btn-light","tc_showSubuserButton",3,"click"],["type","button","ngbTooltip",Ae,1,"btn","btn-light","tc_deleteSubuserButton",3,"click"],Ie,be,["type","button",1,"btn","btn-light","float-end","tc_addS3KeyButton",3,"click"],he,Le,We,["type","button","ngbTooltip",$e,1,"btn","btn-light","tc_showS3KeyButton",3,"click"],["type","button","ngbTooltip",Ze,1,"btn","btn-light","tc_deleteS3KeyButton",3,"click"],De,["type","button","ngbTooltip",Ue,1,"btn","btn-light","tc_showSwiftKeyButton",3,"click"],ve,["type","button","ngbTooltip",ye,"triggers","pointerenter:pointerleave",1,"btn","btn-light","float-end","tc_addCapButton",3,"disabled","disableTooltip","click"],we,p,["type","button","ngbTooltip",W_,1,"btn","btn-light","tc_editCapButton",3,"click"],["type","button","ngbTooltip",$_,1,"btn","btn-light","tc_deleteCapButton",3,"click"],["id","user_quota_max_size_unlimited","type","checkbox","formControlName","user_quota_max_size_unlimited",1,"custom-control-input"],["for","user_quota_max_size_unlimited",1,"custom-control-label"],Z_,["for","user_quota_max_size",1,"cd-col-form-label","required"],D_,["id","user_quota_max_size","type","text","formControlName","user_quota_max_size","cdDimlessBinary","",1,"form-control"],U_,v_,y_,["id","user_quota_max_objects_unlimited","type","checkbox","formControlName","user_quota_max_objects_unlimited",1,"custom-control-input"],["for","user_quota_max_objects_unlimited",1,"custom-control-label"],w_,["for","user_quota_max_objects",1,"cd-col-form-label","required"],x_,["id","user_quota_max_objects","type","number","formControlName","user_quota_max_objects","min","0",1,"form-control"],k_,z_,["id","bucket_quota_max_size_unlimited","type","checkbox","formControlName","bucket_quota_max_size_unlimited",1,"custom-control-input"],["for","bucket_quota_max_size_unlimited",1,"custom-control-label"],q_,["for","bucket_quota_max_size",1,"cd-col-form-label","required"],H_,["id","bucket_quota_max_size","type","text","formControlName","bucket_quota_max_size","cdDimlessBinary","",1,"form-control"],X_,B_,Q_,["id","bucket_quota_max_objects_unlimited","type","checkbox","formControlName","bucket_quota_max_objects_unlimited",1,"custom-control-input"],["for","bucket_quota_max_objects_unlimited",1,"custom-control-label"],Y_,["for","bucket_quota_max_objects",1,"cd-col-form-label","required"],J_,["id","bucket_quota_max_objects","type","number","formControlName","bucket_quota_max_objects","min","0",1,"form-control"],K_,V_]},template:function(_,o){1&_&&e.YNc(0,Mi,92,42,"div",0),2&_&&e.Q6J("cdFormLoading",o.loading)},dependencies:[T.mk,T.sg,T.O5,j.S,pe.s,B.p,fe.U,Nn.Q,Xe.C,r_.y,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.wV,a.Wl,a.EJ,a.JJ,a.JL,a.qQ,a.sg,a.u,G._L,T.rS,_e.m,o_.i]}),t})();var F_=c(99466),Si=c(86969),Ci=c(78877);const Oi=["accessKeyTpl"],Fi=["secretKeyTpl"],Pi=function(t){return[t]};function Ni(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"legend"),e.SDv(2,13),e.qZA(),e.TgZ(3,"div")(4,"cd-table",14),e.NdJ("updateSelection",function(i){e.CHM(_);const s=e.oxw(3);return e.KtG(s.updateKeysSelection(i))}),e.TgZ(5,"div",15)(6,"div",16)(7,"button",17),e.NdJ("click",function(){e.CHM(_);const i=e.oxw(3);return e.KtG(i.showKeyModal())}),e._UZ(8,"i",18),e.ynx(9),e.SDv(10,19),e.BQk(),e.qZA()()()()()()}if(2&t){const _=e.oxw(3);e.xp6(4),e.Q6J("data",_.keys)("columns",_.keysColumns),e.xp6(3),e.Q6J("disabled",!_.keysSelection.hasSingleSelection),e.xp6(1),e.Q6J("ngClass",e.VKq(4,Pi,_.icons.show))}}function Gi(t,n){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,20),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Oqu(_.user.email)}}function Ai(t,n){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.xp6(1),e.AsE(" ",_.id," (",_.permissions,") ")}}function Ii(t,n){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,21),e.qZA(),e.TgZ(3,"td"),e.YNc(4,Ai,2,2,"div",22),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Q6J("ngForOf",_.user.subusers)}}function bi(t,n){if(1&t&&(e.TgZ(0,"div"),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.xp6(1),e.AsE(" ",_.type," (",_.perm,") ")}}function hi(t,n){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,23),e.qZA(),e.TgZ(3,"td"),e.YNc(4,bi,2,2,"div",22),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Q6J("ngForOf",_.user.caps)}}function Li(t,n){if(1&t&&(e.TgZ(0,"tr")(1,"td",8),e.SDv(2,24),e.qZA(),e.TgZ(3,"td"),e._uU(4),e.ALo(5,"join"),e.qZA()()),2&t){const _=e.oxw(3);e.xp6(4),e.Oqu(e.lcZ(5,1,_.user.mfa_ids))}}function Wi(t,n){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function $i(t,n){1&t&&(e.TgZ(0,"td"),e.SDv(1,29),e.qZA())}function Zi(t,n){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.user_quota.max_size)," ")}}function Di(t,n){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function Ui(t,n){1&t&&(e.TgZ(0,"td"),e.SDv(1,30),e.qZA())}function vi(t,n){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",_.user.user_quota.max_objects," ")}}function yi(t,n){if(1&t&&(e.TgZ(0,"div")(1,"legend"),e.SDv(2,25),e.qZA(),e.TgZ(3,"table",2)(4,"tbody")(5,"tr")(6,"td",3),e.SDv(7,26),e.qZA(),e.TgZ(8,"td",5),e._uU(9),e.ALo(10,"booleanText"),e.qZA()(),e.TgZ(11,"tr")(12,"td",8),e.SDv(13,27),e.qZA(),e.YNc(14,Wi,2,0,"td",0),e.YNc(15,$i,2,0,"td",0),e.YNc(16,Zi,3,3,"td",0),e.qZA(),e.TgZ(17,"tr")(18,"td",8),e.SDv(19,28),e.qZA(),e.YNc(20,Di,2,0,"td",0),e.YNc(21,Ui,2,0,"td",0),e.YNc(22,vi,2,1,"td",0),e.qZA()()()()),2&t){const _=e.oxw(3);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.user_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.user_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.user_quota.enabled&&_.user.user_quota.max_objects>-1)}}function wi(t,n){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function xi(t,n){1&t&&(e.TgZ(0,"td"),e.SDv(1,35),e.qZA())}function ki(t,n){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.ALo(2,"dimlessBinary"),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",e.lcZ(2,1,_.user.bucket_quota.max_size)," ")}}function zi(t,n){1&t&&(e.TgZ(0,"td"),e._uU(1,"-"),e.qZA())}function qi(t,n){1&t&&(e.TgZ(0,"td"),e.SDv(1,36),e.qZA())}function Hi(t,n){if(1&t&&(e.TgZ(0,"td"),e._uU(1),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.hij(" ",_.user.bucket_quota.max_objects," ")}}function Xi(t,n){if(1&t&&(e.TgZ(0,"div")(1,"legend"),e.SDv(2,31),e.qZA(),e.TgZ(3,"table",2)(4,"tbody")(5,"tr")(6,"td",3),e.SDv(7,32),e.qZA(),e.TgZ(8,"td",5),e._uU(9),e.ALo(10,"booleanText"),e.qZA()(),e.TgZ(11,"tr")(12,"td",8),e.SDv(13,33),e.qZA(),e.YNc(14,wi,2,0,"td",0),e.YNc(15,xi,2,0,"td",0),e.YNc(16,ki,3,3,"td",0),e.qZA(),e.TgZ(17,"tr")(18,"td",8),e.SDv(19,34),e.qZA(),e.YNc(20,zi,2,0,"td",0),e.YNc(21,qi,2,0,"td",0),e.YNc(22,Hi,2,1,"td",0),e.qZA()()()()),2&t){const _=e.oxw(3);e.xp6(9),e.Oqu(e.lcZ(10,7,_.user.bucket_quota.enabled)),e.xp6(5),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_size>-1),e.xp6(4),e.Q6J("ngIf",!_.user.bucket_quota.enabled),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects<=-1),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota.enabled&&_.user.bucket_quota.max_objects>-1)}}function Bi(t,n){if(1&t&&(e.TgZ(0,"div"),e.YNc(1,Ni,11,6,"div",0),e.TgZ(2,"legend"),e.SDv(3,1),e.qZA(),e.TgZ(4,"table",2)(5,"tbody")(6,"tr")(7,"td",3),e.SDv(8,4),e.qZA(),e.TgZ(9,"td",5),e._uU(10),e.qZA()(),e.TgZ(11,"tr")(12,"td",3),e.SDv(13,6),e.qZA(),e.TgZ(14,"td",5),e._uU(15),e.qZA()(),e.TgZ(16,"tr")(17,"td",3),e.SDv(18,7),e.qZA(),e.TgZ(19,"td",5),e._uU(20),e.qZA()(),e.TgZ(21,"tr")(22,"td",8),e.SDv(23,9),e.qZA(),e.TgZ(24,"td"),e._uU(25),e.qZA()(),e.YNc(26,Gi,5,1,"tr",0),e.TgZ(27,"tr")(28,"td",8),e.SDv(29,10),e.qZA(),e.TgZ(30,"td"),e._uU(31),e.ALo(32,"booleanText"),e.qZA()(),e.TgZ(33,"tr")(34,"td",8),e.SDv(35,11),e.qZA(),e.TgZ(36,"td"),e._uU(37),e.ALo(38,"booleanText"),e.qZA()(),e.TgZ(39,"tr")(40,"td",8),e.SDv(41,12),e.qZA(),e.TgZ(42,"td"),e._uU(43),e.ALo(44,"map"),e.qZA()(),e.YNc(45,Ii,5,1,"tr",0),e.YNc(46,hi,5,1,"tr",0),e.YNc(47,Li,6,3,"tr",0),e.qZA()(),e.YNc(48,yi,23,9,"div",0),e.YNc(49,Xi,23,9,"div",0),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.Q6J("ngIf",_.keys.length),e.xp6(9),e.Oqu(_.user.tenant),e.xp6(5),e.Oqu(_.user.user_id),e.xp6(5),e.Oqu(_.user.uid),e.xp6(5),e.Oqu(_.user.display_name),e.xp6(1),e.Q6J("ngIf",null==_.user.email?null:_.user.email.length),e.xp6(5),e.Oqu(e.lcZ(32,14,_.user.suspended)),e.xp6(6),e.Oqu(e.lcZ(38,16,"true"===_.user.system)),e.xp6(6),e.Oqu(e.xi3(44,18,_.user.max_buckets,_.maxBucketsMap)),e.xp6(2),e.Q6J("ngIf",_.user.subusers&&_.user.subusers.length),e.xp6(1),e.Q6J("ngIf",_.user.caps&&_.user.caps.length),e.xp6(1),e.Q6J("ngIf",null==_.user.mfa_ids?null:_.user.mfa_ids.length),e.xp6(1),e.Q6J("ngIf",_.user.user_quota),e.xp6(1),e.Q6J("ngIf",_.user.bucket_quota)}}function Qi(t,n){if(1&t&&(e.ynx(0),e.YNc(1,Bi,50,21,"div",0),e.BQk()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",_.user)}}let Yi=(()=>{class t{constructor(_,o){this.rgwUserService=_,this.modalService=o,this.keys=[],this.keysColumns=[],this.keysSelection=new qe.r,this.icons=$.P}ngOnInit(){this.keysColumns=[{name:"Username",prop:"username",flexGrow:1},{name:"Type",prop:"type",flexGrow:1}],this.maxBucketsMap={"-1":"Disabled",0:"Unlimited"}}ngOnChanges(){this.selection&&(this.user=this.selection,this.user.subusers=E().sortBy(this.user.subusers,"id"),this.user.caps=E().sortBy(this.user.caps,"type"),this.rgwUserService.getQuota(this.user.uid).subscribe(_=>{E().extend(this.user,_)}),this.keys=[],this.user.keys&&this.user.keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"S3",username:_.user,ref:_})}),this.user.swift_keys&&this.user.swift_keys.forEach(_=>{this.keys.push({id:this.keys.length+1,type:"Swift",username:_.user,ref:_})}),this.keys=E().sortBy(this.keys,"user"))}updateKeysSelection(_){this.keysSelection=_}showKeyModal(){const _=this.keysSelection.first(),o=this.modalService.show("S3"===_.type?m_:S_);switch(_.type){case"S3":o.componentInstance.setViewing(),o.componentInstance.setValues(_.ref.user,_.ref.access_key,_.ref.secret_key);break;case"Swift":o.componentInstance.setValues(_.ref.user,_.ref.secret_key)}}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(Q),e.Y36(ee.Z))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-details"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Oi,5),e.Gf(Fi,5)),2&_){let i;e.iGM(i=e.CRH())&&(o.accessKeyTpl=i.first),e.iGM(i=e.CRH())&&(o.secretKeyTpl=i.first)}},inputs:{selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W,C,Z,D,U,v,y,f,P;return n="Details",_="Tenant",o="User ID",i="Username",s="Full name",l="Suspended",r="System",d="Maximum buckets",u="Keys",R="Show",O="Email address",F="Subusers",b="Capabilities",h="MFAs(Id)",M="User quota",L="Enabled",S="Maximum size",W="Maximum objects",C="Unlimited",Z="Unlimited",D="Bucket quota",U="Enabled",v="Maximum size",y="Maximum objects",f="Unlimited",P="Unlimited",[[4,"ngIf"],n,[1,"table","table-striped","table-bordered"],[1,"bold","w-25"],_,[1,"w-75"],o,i,[1,"bold"],s,l,r,d,u,["columnMode","flex","selectionType","multi","forceIdentifier","true",3,"data","columns","updateSelection"],[1,"table-actions"],["dropdown","",1,"btn-group"],["type","button",1,"btn","btn-accent",3,"disabled","click"],[3,"ngClass"],R,O,F,[4,"ngFor","ngForOf"],b,h,M,L,S,W,C,Z,D,U,v,y,f,P]},template:function(_,o){1&_&&e.YNc(0,Qi,2,1,"ng-container",0),2&_&&e.Q6J("ngIf",o.selection)},dependencies:[T.mk,T.sg,T.O5,le.a,k.o,T_.T,Ye.$,Si.A,Ci.b]}),t})();const P_=function(){return{exact:!0}};let Ji=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-tabs"]],decls:7,vars:4,consts:function(){let n,_;return n="Users",_="Roles",[[1,"nav","nav-tabs"],[1,"nav-item"],["routerLink","/rgw/user","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],n,["routerLink","/rgw/roles","routerLinkActive","active","ariaCurrentWhenActive","page",1,"nav-link",3,"routerLinkActiveOptions"],_]},template:function(_,o){1&_&&(e.TgZ(0,"ul",0)(1,"li",1)(2,"a",2),e.SDv(3,3),e.qZA()(),e.TgZ(4,"li",1)(5,"a",4),e.SDv(6,5),e.qZA()()()),2&_&&(e.xp6(2),e.Q6J("routerLinkActiveOptions",e.DdM(2,P_)),e.xp6(3),e.Q6J("routerLinkActiveOptions",e.DdM(3,P_)))},dependencies:[J.rH,J.Od]}),t})();const Ki=["userSizeTpl"],Vi=["userObjectTpl"];function ji(t,n){if(1&t&&e._UZ(0,"cd-usage-bar",8),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_size)("used",_.stats.size_actual)}}function es(t,n){1&t&&e.SDv(0,9)}function _s(t,n){if(1&t&&(e.YNc(0,ji,1,2,"cd-usage-bar",6),e.YNc(1,es,1,0,"ng-template",null,7,e.W1O)),2&t){const _=n.row,o=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_size>0&&_.user_quota.enabled)("ngIfElse",o)}}function ts(t,n){if(1&t&&e._UZ(0,"cd-usage-bar",12),2&t){const _=e.oxw().row;e.Q6J("total",_.user_quota.max_objects)("used",_.stats.num_objects)("isBinary",!1)}}function os(t,n){1&t&&e.SDv(0,13)}function ns(t,n){if(1&t&&(e.YNc(0,ts,1,3,"cd-usage-bar",10),e.YNc(1,os,1,0,"ng-template",null,11,e.W1O)),2&t){const _=n.row,o=e.MAs(2);e.Q6J("ngIf",_.user_quota.max_objects>0&&_.user_quota.enabled)("ngIfElse",o)}}let ss=(()=>{class t extends Be.o{constructor(_,o,i,s,l,r){super(r),this.authStorageService=_,this.rgwUserService=o,this.modalService=i,this.urlBuilder=s,this.actionLabels=l,this.ngZone=r,this.columns=[],this.users=[],this.selection=new qe.r}ngOnInit(){this.permission=this.authStorageService.getPermissions().rgw,this.columns=[{name:"Username",prop:"uid",flexGrow:1},{name:"Tenant",prop:"tenant",flexGrow:1},{name:"Full name",prop:"display_name",flexGrow:1},{name:"Email address",prop:"email",flexGrow:1},{name:"Suspended",prop:"suspended",flexGrow:1,cellClass:"text-center",cellTransformation:F_.e.checkIcon},{name:"Max. buckets",prop:"max_buckets",flexGrow:1,cellTransformation:F_.e.map,customTemplateConfig:{"-1":"Disabled",0:"Unlimited"}},{name:"Capacity Limit %",prop:"size_usage",cellTemplate:this.userSizeTpl,flexGrow:.8},{name:"Object Limit %",prop:"object_usage",cellTemplate:this.userObjectTpl,flexGrow:.8}];const _=()=>this.selection.first()&&`${encodeURIComponent(this.selection.first().uid)}`;this.tableActions=[{permission:"create",icon:$.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE,canBePrimary:l=>!l.hasSelection},{permission:"update",icon:$.P.edit,routerLink:()=>this.urlBuilder.getEdit(_()),name:this.actionLabels.EDIT},{permission:"delete",icon:$.P.destroy,click:()=>this.deleteAction(),disable:()=>!this.selection.hasSelection,name:this.actionLabels.DELETE,canBePrimary:l=>l.hasMultiSelection}],this.setTableRefreshTimeout()}getUserList(_){this.setTableRefreshTimeout(),this.rgwUserService.list().subscribe(o=>{this.users=o},()=>{_.error()})}updateSelection(_){this.selection=_}deleteAction(){this.modalService.show(Qe.M,{itemDescription:this.selection.hasSingleSelection?"user":"users",itemNames:this.selection.selected.map(_=>_.uid),submitActionObservable:()=>new u_.y(_=>{(0,ne.D)(this.selection.selected.map(o=>this.rgwUserService.delete(o.uid))).subscribe({error:o=>{_.error(o),this.table.refreshBtn()},complete:()=>{_.complete(),this.table.refreshBtn()}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ce.j),e.Y36(Q),e.Y36(ee.Z),e.Y36(re.F),e.Y36(I.p4),e.Y36(e.R0b))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-user-list"]],viewQuery:function(_,o){if(1&_&&(e.Gf(le.a,7),e.Gf(Ki,7),e.Gf(Vi,7)),2&_){let i;e.iGM(i=e.CRH())&&(o.table=i.first),e.iGM(i=e.CRH())&&(o.userSizeTpl=i.first),e.iGM(i=e.CRH())&&(o.userObjectTpl=i.first)}},features:[e._Bn([{provide:re.F,useValue:new re.F("rgw/user")}]),e.qOj],decls:9,vars:9,consts:function(){let n,_;return n="No Limit",_="No Limit",[["columnMode","flex","selectionType","multiClick","identifier","uid",3,"autoReload","data","columns","hasDetails","status","setExpandedRow","updateSelection","fetchData"],["table",""],[1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","",3,"selection"],["userSizeTpl",""],["userObjectTpl",""],[3,"total","used",4,"ngIf","ngIfElse"],["noSizeQuota",""],[3,"total","used"],n,[3,"total","used","isBinary",4,"ngIf","ngIfElse"],["noObjectQuota",""],[3,"total","used","isBinary"],_]},template:function(_,o){1&_&&(e._UZ(0,"cd-rgw-user-tabs"),e.TgZ(1,"cd-table",0,1),e.NdJ("setExpandedRow",function(s){return o.setExpandedRow(s)})("updateSelection",function(s){return o.updateSelection(s)})("fetchData",function(s){return o.getUserList(s)}),e._UZ(3,"cd-table-actions",2)(4,"cd-rgw-user-details",3),e.qZA(),e.YNc(5,_s,3,2,"ng-template",null,4,e.W1O),e.YNc(7,ns,3,2,"ng-template",null,5,e.W1O)),2&_&&(e.xp6(1),e.Q6J("autoReload",!1)("data",o.users)("columns",o.columns)("hasDetails",!0)("status",o.tableStatus),e.xp6(2),e.Q6J("permission",o.permission)("selection",o.selection)("tableActions",o.tableActions),e.xp6(1),e.Q6J("selection",o.expandedRow))},dependencies:[T.O5,R_.O,le.a,Je.K,Yi,Ji]}),t})();var as=c(83357),je=c(62946),N_=c(13464),ls=c(46797),de=c(95596),e_=c(80381),V=c(95463),x=c(43186),ue=c(97937),ge=c(98961);function rs(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,33),e.qZA())}function cs(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,34),e.qZA())}function ds(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,35),e.qZA())}function us(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,36),e.qZA())}function gs(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,37),e.qZA())}function Rs(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,38),e.qZA())}function Ts(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,39),e.qZA())}function Es(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,40),e.qZA())}function fs(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,41),e.qZA())}function ps(t,n){1&t&&(e.TgZ(0,"span",32),e.SDv(1,42),e.qZA())}let ms=(()=>{class t{constructor(_,o,i,s,l,r,d,u,R){this.activeModal=_,this.actionLabels=o,this.rgwMultisiteService=i,this.rgwZoneService=s,this.notificationService=l,this.rgwZonegroupService=r,this.rgwRealmService=d,this.rgwDaemonService=u,this.modalService=R,this.endpoints=/^((https?:\/\/)|(www.))(?:([a-zA-Z]+)|(\d+\.\d+.\d+.\d+)):\d{2,4}$/,this.ipv4Rgx=/^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/i,this.ipv6Rgx=/^(?:[a-f0-9]{1,4}:){7}[a-f0-9]{1,4}$/i,this.submitAction=new e.vpe,this.multisiteInfo=[],this.createForm()}createForm(){this.multisiteMigrateForm=new V.d({realmName:new a.p4(null,{validators:[a.kI.required,m.h.custom("uniqueName",_=>this.realmNames&&-1!==this.zoneNames.indexOf(_))]}),zonegroupName:new a.p4(null,{validators:[a.kI.required,m.h.custom("uniqueName",_=>this.zonegroupNames&&-1!==this.zoneNames.indexOf(_))]}),zoneName:new a.p4(null,{validators:[a.kI.required,m.h.custom("uniqueName",_=>this.zoneNames&&-1!==this.zoneNames.indexOf(_))]}),zone_endpoints:new a.p4([],{validators:[m.h.custom("endpoint",_=>!(E().isEmpty(_)||(_.includes(",")?(_.split(",").forEach(o=>!this.endpoints.test(o)&&!this.ipv4Rgx.test(o)&&!this.ipv6Rgx.test(o)),1):this.endpoints.test(_)||this.ipv4Rgx.test(_)||this.ipv6Rgx.test(_)))),a.kI.required]}),zonegroup_endpoints:new a.p4([],[m.h.custom("endpoint",_=>!(E().isEmpty(_)||(_.includes(",")?(_.split(",").forEach(o=>!this.endpoints.test(o)&&!this.ipv4Rgx.test(o)&&!this.ipv6Rgx.test(o)),1):this.endpoints.test(_)||this.ipv4Rgx.test(_)||this.ipv6Rgx.test(_)))),a.kI.required]),access_key:new a.p4(null),secret_key:new a.p4(null)})}ngOnInit(){this.realmList=void 0!==this.multisiteInfo[0]&&this.multisiteInfo[0].hasOwnProperty("realms")?this.multisiteInfo[0].realms:[],this.realmNames=this.realmList.map(_=>_.name),this.zonegroupList=void 0!==this.multisiteInfo[1]&&this.multisiteInfo[1].hasOwnProperty("zonegroups")?this.multisiteInfo[1].zonegroups:[],this.zonegroupNames=this.zonegroupList.map(_=>_.name),this.zoneList=void 0!==this.multisiteInfo[2]&&this.multisiteInfo[2].hasOwnProperty("zones")?this.multisiteInfo[2].zones:[],this.zoneNames=this.zoneList.map(_=>_.name)}submit(){const _=this.multisiteMigrateForm.value;this.realm=new x.L6,this.realm.name=_.realmName,this.zonegroup=new x.iG,this.zonegroup.name=_.zonegroupName,this.zonegroup.endpoints=_.zonegroup_endpoints,this.zone=new x.jb,this.zone.name=_.zoneName,this.zone.endpoints=_.zone_endpoints,this.zone.system_key=new x.VY,this.zone.system_key.access_key=_.access_key,this.zone.system_key.secret_key=_.secret_key,this.rgwMultisiteService.migrate(this.realm,this.zonegroup,this.zone).subscribe(()=>{this.notificationService.show(w.k.success,"" + this.actionLabels.MIGRATE + " done successfully"),this.submitAction.emit(),this.activeModal.close()},()=>{this.notificationService.show(w.k.error,"Migration failed")})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(I.p4),e.Y36(e_.o),e.Y36(ue.g),e.Y36(Y.g),e.Y36(ge.K),e.Y36(de.y),e.Y36(oe.b),e.Y36(ee.Z))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-migrate"]],outputs:{submitAction:"submitAction"},decls:68,vars:14,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W;return n="Migrate Single Site to Multi-Site " + "\ufffd#3\ufffd" + "" + "\ufffd#4\ufffd" + "Migrate from a single-site deployment with a default zone group and zone to a multi-site system" + "\ufffd/#4\ufffd" + "" + "\ufffd/#3\ufffd" + "",_="Realm Name",o="Rename default zone group",i="Zone group Endpoints ",s="Rename default zone",l="Zone Endpoints ",r="S3 access key " + "\ufffd#47\ufffd" + "" + "\ufffd#48\ufffd" + "To see or copy your S3 access key, go to " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Object Gateway > Users" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + " and click on your user name. In " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Keys" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + ", click " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Show" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + ". View the access key by clicking Show and copy the key by clicking " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Copy to Clipboard" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + "." + "\ufffd/#48\ufffd" + "" + "\ufffd/#47\ufffd" + "",r=e.Zx4(r),d="S3 secret key " + "\ufffd#58\ufffd" + "" + "\ufffd#59\ufffd" + "To see or copy your S3 access key, go to " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Object Gateway > Users" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + " and click on your user name. In " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Keys" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + ", click " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Show" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + ". View the secret key by clicking Show and copy the key by clicking " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Copy to Clipboard" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + "." + "\ufffd/#59\ufffd" + "" + "\ufffd/#58\ufffd" + "",d=e.Zx4(d),u="This field is required.",R="The chosen realm name is already in use.",O="This field is required.",F="The chosen zone group name is already in use.",b="This field is required.",h="Please enter a valid IP address.",M="This field is required.",L="The chosen zone name is already in use.",S="This field is required.",W="Please enter a valid IP address.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","multisiteMigrateForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","realmName",1,"cd-col-form-label","required"],_,[1,"cd-col-form-input"],["type","text","placeholder","Realm name...","id","realmName","name","realmName","formControlName","realmName",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","zonegroupName",1,"cd-col-form-label","required"],o,["type","text","placeholder","Zone group name...","id","zonegroupName","name","zonegroupName","formControlName","zonegroupName",1,"form-control"],["for","zonegroup_endpoints",1,"cd-col-form-label","required"],i,["type","text","placeholder","e.g, http://ceph-node-00.com:80","id","zonegroup_endpoints","name","zonegroup_endpoints","formControlName","zonegroup_endpoints",1,"form-control"],["for","zoneName",1,"cd-col-form-label","required"],s,["type","text","placeholder","Zone name...","id","zoneName","name","zoneName","formControlName","zoneName",1,"form-control"],["for","zone_endpoints",1,"cd-col-form-label","required"],l,["type","text","placeholder","e.g, http://ceph-node-00.com:80","id","zone_endpoints","name","zone_endpoints","formControlName","zone_endpoints",1,"form-control"],["for","access_key",1,"cd-col-form-label","required"],r,["type","text","placeholder","e.g.","id","access_key","name","access_key","formControlName","access_key",1,"form-control"],d,["type","text","placeholder","e.g.","id","secret_key","name","secret_key","formControlName","secret_key",1,"form-control"],[1,"modal-footer"],[3,"submitText","form","submitActionEvent"],[1,"invalid-feedback"],u,R,O,F,b,h,M,L,S,W]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.tHW(2,2),e.TgZ(3,"cd-helper"),e._UZ(4,"span"),e.qZA(),e.N_p(),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,rs,2,0,"span",12),e.YNc(15,cs,2,0,"span",12),e.qZA()(),e.TgZ(16,"div",7)(17,"label",13),e.SDv(18,14),e.qZA(),e.TgZ(19,"div",10),e._UZ(20,"input",15),e.YNc(21,ds,2,0,"span",12),e.YNc(22,us,2,0,"span",12),e.qZA()(),e.TgZ(23,"div",7)(24,"label",16),e.SDv(25,17),e.qZA(),e.TgZ(26,"div",10),e._UZ(27,"input",18),e.YNc(28,gs,2,0,"span",12),e.YNc(29,Rs,2,0,"span",12),e.qZA()(),e.TgZ(30,"div",7)(31,"label",19),e.SDv(32,20),e.qZA(),e.TgZ(33,"div",10),e._UZ(34,"input",21),e.YNc(35,Ts,2,0,"span",12),e.YNc(36,Es,2,0,"span",12),e.qZA()(),e.TgZ(37,"div",7)(38,"label",22),e.SDv(39,23),e.qZA(),e.TgZ(40,"div",10),e._UZ(41,"input",24),e.YNc(42,fs,2,0,"span",12),e.YNc(43,ps,2,0,"span",12),e.qZA()(),e.TgZ(44,"div",7)(45,"label",25),e.tHW(46,26),e.TgZ(47,"cd-helper")(48,"span"),e._UZ(49,"b")(50,"b")(51,"b")(52,"b"),e.qZA()(),e.N_p(),e.qZA(),e.TgZ(53,"div",10),e._UZ(54,"input",27),e.qZA()(),e.TgZ(55,"div",7)(56,"label",25),e.tHW(57,28),e.TgZ(58,"cd-helper")(59,"span"),e._UZ(60,"b")(61,"b")(62,"b")(63,"b"),e.qZA()(),e.N_p(),e.qZA(),e.TgZ(64,"div",10),e._UZ(65,"input",29),e.qZA()()(),e.TgZ(66,"div",30)(67,"cd-form-button-panel",31),e.NdJ("submitActionEvent",function(){return o.submit()}),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(6),e.Q6J("formGroup",o.multisiteMigrateForm),e.xp6(8),e.Q6J("ngIf",o.multisiteMigrateForm.showError("realmName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteMigrateForm.showError("realmName",i,"uniqueName")),e.xp6(6),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zonegroupName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zonegroupName",i,"uniqueName")),e.xp6(6),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zonegroup_endpoints",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zonegroup_endpoints",i,"endpoint")),e.xp6(6),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zoneName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zoneName",i,"uniqueName")),e.xp6(6),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zone_endpoints",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteMigrateForm.showError("zone_endpoints",i,"endpoint")),e.xp6(24),e.Q6J("submitText",o.actionLabels.MIGRATE)("form",o.multisiteMigrateForm)}},dependencies:[T.O5,j.S,X.z,B.p,k.o,q.b,z.P,H.V,a._Y,a.Fj,a.JJ,a.JL,a.sg,a.u]}),t})();var G_=c(80842),Re=c(34501);function Ms(t,n){if(1&t&&(e.TgZ(0,"strong",21),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.xp6(1),e.Oqu(_)}}function Ss(t,n){1&t&&(e.TgZ(0,"div",22)(1,"cd-alert-panel",23),e.SDv(2,24),e.qZA()())}function Cs(t,n){if(1&t){const _=e.EpF();e.ynx(0),e.TgZ(1,"label",10),e.tHW(2,11),e._UZ(3,"strong"),e.N_p(),e.qZA(),e.TgZ(4,"label",12),e.SDv(5,13),e.qZA(),e.YNc(6,Ms,2,1,"strong",14),e.TgZ(7,"div",15)(8,"div",16)(9,"input",17),e.NdJ("change",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.showDangerText())}),e.qZA(),e.TgZ(10,"label",18),e.SDv(11,19),e.qZA()(),e.YNc(12,Ss,3,0,"div",20),e.qZA(),e.BQk()}if(2&t){const _=e.oxw();e.xp6(3),e.pQV(null==_.zone?null:_.zone.name),e.QtT(2),e.xp6(3),e.Q6J("ngForOf",_.includedPools),e.xp6(6),e.Q6J("ngIf",_.displayText)}}let Os=(()=>{class t{constructor(_,o,i,s,l){this.activeModal=_,this.actionLabels=o,this.notificationService=i,this.rgwZoneService=s,this.poolService=l,this.displayText=!1,this.includedPools=new Set,this.createForm()}ngOnInit(){this.zoneData$=this.rgwZoneService.get(this.zone),this.poolList$=this.poolService.getList()}ngAfterViewInit(){this.updateIncludedPools()}createForm(){this.zoneForm=new V.d({deletePools:new a.p4(!1)})}submit(){this.rgwZoneService.delete(this.zone.name,this.zoneForm.value.deletePools,this.includedPools,this.zone.parent).subscribe(()=>{this.notificationService.show(w.k.success,"Zone: '" + this.zone.name + "' deleted successfully"),this.activeModal.close()},()=>{this.zoneForm.setErrors({cdSubmitButton:!0})})}showDangerText(){this.displayText=!this.displayText}updateIncludedPools(){!this.zoneData$||!this.poolList$||this.zoneData$.subscribe(_=>{this.poolList$.subscribe(o=>{for(const i of o)for(const s of Object.values(_))if("string"==typeof s&&s.includes(i.pool_name))this.includedPools.add(i.pool_name);else if(Array.isArray(s)&&s[0].val)for(const l of s){const r=l.val;r.storage_classes.STANDARD.data_pool===i.pool_name&&this.includedPools.add(r.storage_classes.STANDARD.data_pool),r.data_extra_pool===i.pool_name&&this.includedPools.add(r.data_extra_pool),r.index_pool===i.pool_name&&this.includedPools.add(r.index_pool)}})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(I.p4),e.Y36(Y.g),e.Y36(ue.g),e.Y36(G_.q))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-zone-deletion-form"]],decls:12,vars:6,consts:function(){let n,_,o,i,s,l;return n="Delete Zone",_=" This will delete your " + "\ufffd#8\ufffd" + "" + "\ufffd0\ufffd" + "" + "\ufffd/#8\ufffd" + " Zone. ",o=" Do you want to delete the associated pools with the " + "\ufffd#3\ufffd" + "" + "\ufffd0\ufffd" + "" + "\ufffd/#3\ufffd" + " Zone?",i=" This will delete the following pools and any data stored in these pools:",s="Yes, I want to delete the pools.",l=" This will delete all the data in the pools! ",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","zoneForm","novalidate","",3,"formGroup"],[1,"modal-body","ms-4"],_,[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"mt-3"],o,[1,"mb-4"],i,["class","block",4,"ngFor","ngForOf"],[1,"form-group"],[1,"custom-control","custom-checkbox","mt-2"],["type","checkbox","name","deletePools","id","deletePools","formControlName","deletePools",1,"custom-control-input",3,"change"],["for","deletePools",1,"custom-control-label"],s,["class","me-4",4,"ngIf"],[1,"block"],[1,"me-4"],["type","danger"],l]},template:function(_,o){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4)(5,"div",5)(6,"label"),e.tHW(7,6),e._UZ(8,"strong"),e.N_p(),e.qZA(),e.YNc(9,Cs,13,3,"ng-container",7),e.qZA(),e.TgZ(10,"div",8)(11,"cd-form-button-panel",9),e.NdJ("submitActionEvent",function(){return o.submit()}),e.qZA()()(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.zoneForm),e.xp6(4),e.pQV(null==o.zone?null:o.zone.name),e.QtT(7),e.xp6(1),e.Q6J("ngIf",o.includedPools.size),e.xp6(2),e.Q6J("form",o.zoneForm)("submitText",o.actionLabels.DELETE))},dependencies:[T.sg,T.O5,X.z,Re.G,B.p,k.o,q.b,z.P,H.V,a._Y,a.Wl,a.JJ,a.JL,a.sg,a.u],styles:[".block[_ngcontent-%COMP%]{display:block}#scroll[_ngcontent-%COMP%]{height:100%;max-height:10rem;overflow:auto}"]}),t})();function Fs(t,n){1&t&&(e.ynx(0),e.TgZ(1,"label"),e.SDv(2,21),e.qZA(),e.BQk())}function Ps(t,n){if(1&t&&(e.TgZ(0,"strong",22),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.xp6(1),e.Oqu(_)}}function Ns(t,n){if(1&t&&(e.TgZ(0,"strong",22),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.xp6(1),e.Oqu(_)}}function Gs(t,n){if(1&t&&(e.ynx(0),e.TgZ(1,"strong",13),e._uU(2,"Pools:"),e.qZA(),e.TgZ(3,"div",23),e.YNc(4,Ns,2,1,"strong",15),e.qZA(),e.BQk()),2&t){const _=e.oxw(2);e.xp6(4),e.Q6J("ngForOf",_.includedPools)}}function As(t,n){1&t&&(e.ynx(0),e.TgZ(1,"label",24),e.SDv(2,25),e.qZA(),e.BQk())}function Is(t,n){1&t&&(e.TgZ(0,"div",26)(1,"cd-alert-panel",27),e.SDv(2,28),e.qZA()())}function bs(t,n){if(1&t){const _=e.EpF();e.ynx(0),e.TgZ(1,"label",11),e.tHW(2,12),e._UZ(3,"strong"),e.N_p(),e.qZA(),e.YNc(4,Fs,3,0,"ng-container",7),e.TgZ(5,"strong",13),e._uU(6,"Zones:"),e.qZA(),e.TgZ(7,"div",14),e.YNc(8,Ps,2,1,"strong",15),e.qZA(),e.YNc(9,Gs,5,1,"ng-container",7),e.TgZ(10,"div",16)(11,"div",17)(12,"input",18),e.NdJ("change",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.showDangerText())}),e.qZA(),e.YNc(13,As,3,0,"ng-container",19),e.qZA(),e.YNc(14,Is,3,0,"div",20),e.qZA(),e.BQk()}if(2&t){const _=e.oxw(),o=e.MAs(13);e.xp6(3),e.pQV(null==_.zonegroup?null:_.zonegroup.name),e.QtT(2),e.xp6(1),e.Q6J("ngIf",_.includedPools.size>0),e.xp6(4),e.Q6J("ngForOf",_.zonesList),e.xp6(1),e.Q6J("ngIf",_.includedPools.size>0),e.xp6(4),e.Q6J("ngIf",_.includedPools.size>0)("ngIfElse",o),e.xp6(1),e.Q6J("ngIf",_.displayText)}}function hs(t,n){1&t&&(e.TgZ(0,"label",24),e.SDv(1,29),e.qZA())}let Ls=(()=>{class t{constructor(_,o,i,s,l,r){this.activeModal=_,this.actionLabels=o,this.notificationService=i,this.rgwZonegroupService=s,this.poolService=l,this.rgwZoneService=r,this.zonesPools=[],this.zonesList=[],this.displayText=!1,this.includedPools=new Set,this.createForm()}ngOnInit(){this.zonegroupData$=this.rgwZonegroupService.get(this.zonegroup),this.poolList$=this.poolService.getList()}ngAfterViewInit(){this.updateIncludedPools()}createForm(){this.zonegroupForm=new V.d({deletePools:new a.p4(!1)})}submit(){this.rgwZonegroupService.delete(this.zonegroup.name,this.zonegroupForm.value.deletePools,this.includedPools).subscribe(()=>{this.notificationService.show(w.k.success,"Zone: '" + this.zonegroup.name + "' deleted successfully"),this.activeModal.close()})}showDangerText(){this.includedPools.size>0&&(this.displayText=!this.displayText)}updateIncludedPools(){!this.zonegroupData$||!this.poolList$||this.zonegroupData$.subscribe(_=>{for(const o of _.zones)this.zonesList.push(o.name),this.rgwZoneService.get(o).subscribe(i=>{this.poolList$.subscribe(s=>{for(const l of Object.values(i))for(const r of s)if("string"==typeof l&&l.includes(r.pool_name))this.includedPools.add(r.pool_name);else if(Array.isArray(l)&&l[0].val)for(const d of l){const u=d.val;u.storage_classes.STANDARD.data_pool===r.pool_name&&this.includedPools.add(u.storage_classes.STANDARD.data_pool),u.data_extra_pool===r.pool_name&&this.includedPools.add(u.data_extra_pool),u.index_pool===r.pool_name&&this.includedPools.add(u.index_pool)}})})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(I.p4),e.Y36(Y.g),e.Y36(ge.K),e.Y36(G_.q),e.Y36(ue.g))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-zonegroup-deletion-form"]],decls:14,vars:6,consts:function(){let n,_,o,i,s,l,r;return n="Delete Zone Group",_=" This will delete your " + "\ufffd#8\ufffd" + "" + "\ufffd0\ufffd" + "" + "\ufffd/#8\ufffd" + " Zone Group. ",o=" Do you want to delete the associated zones and pools with the " + "\ufffd#3\ufffd" + "" + "\ufffd0\ufffd" + "" + "\ufffd/#3\ufffd" + " Zone Group?",i=" This will delete the following:",s="Yes, I want to delete the zones and their pools.",l=" This will delete all the data in the pools! ",r="Yes, I want to delete the zones.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","zonegroupForm","novalidate","",3,"formGroup"],[1,"modal-body","ms-4"],_,[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],["noPoolsConfirmation",""],[1,"mt-3"],o,[1,"mt-3","mb-2","h5","block"],["id","scroll"],["class","block",4,"ngFor","ngForOf"],[1,"form-group"],[1,"custom-control","custom-checkbox","mt-2"],["type","checkbox","name","deletePools","id","deletePools","formControlName","deletePools",1,"custom-control-input",3,"change"],[4,"ngIf","ngIfElse"],["class","me-4",4,"ngIf"],i,[1,"block"],["id","scroll",1,"mb-2"],["for","deletePools",1,"custom-control-label"],s,[1,"me-4"],["type","danger"],l,r]},template:function(_,o){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4)(5,"div",5)(6,"label"),e.tHW(7,6),e._UZ(8,"strong"),e.N_p(),e.qZA(),e.YNc(9,bs,15,7,"ng-container",7),e.qZA(),e.TgZ(10,"div",8)(11,"cd-form-button-panel",9),e.NdJ("submitActionEvent",function(){return o.submit()}),e.qZA()()(),e.BQk(),e.qZA(),e.YNc(12,hs,2,0,"ng-template",null,10,e.W1O)),2&_&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.zonegroupForm),e.xp6(4),e.pQV(null==o.zonegroup?null:o.zonegroup.name),e.QtT(7),e.xp6(1),e.Q6J("ngIf",o.zonesList.length>0),e.xp6(2),e.Q6J("form",o.zonegroupForm)("submitText",o.actionLabels.DELETE))},dependencies:[T.sg,T.O5,X.z,Re.G,B.p,k.o,q.b,z.P,H.V,a._Y,a.Wl,a.JJ,a.JL,a.sg,a.u],styles:[".block[_ngcontent-%COMP%]{display:block}#scroll[_ngcontent-%COMP%]{height:100%;max-height:10rem;overflow:auto}"]}),t})();const Ws=function(t,n,_){return[t,n,_]};function $s(t,n){if(1&t&&(e.TgZ(0,"span",10),e._UZ(1,"i",11),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngClass",e.kEZ(1,Ws,_.icons.large3x,_.icons.spinner,_.icons.spin))}}function Zs(t,n){if(1&t&&(e.tHW(0,16,1),e.TgZ(1,"div"),e._UZ(2,"b"),e.qZA(),e.N_p()),2&t){const _=n.$implicit;e.xp6(2),e.pQV(_.realm)(_.token),e.QtT(0)}}function Ds(t,n){if(1&t&&(e.TgZ(0,"cd-alert-panel",15),e.tHW(1,16),e.YNc(2,Zs,3,2,"div",14),e.N_p(),e.qZA()),2&t){const _=e.oxw(2);e.xp6(2),e.Q6J("ngForOf",_.realms)}}function Us(t,n){1&t&&e._UZ(0,"hr")}function vs(t,n){if(1&t&&(e.TgZ(0,"div")(1,"div",17)(2,"label",18),e.SDv(3,19),e.qZA(),e.TgZ(4,"div",20),e._UZ(5,"input",21),e.qZA()(),e.TgZ(6,"div",17)(7,"label",22),e.SDv(8,23),e.qZA(),e.TgZ(9,"div",20),e._UZ(10,"input",24)(11,"cd-copy-2-clipboard-button",25),e.qZA(),e.YNc(12,Us,1,0,"hr",26),e.qZA()()),2&t){const _=n.$implicit,o=e.oxw(2);e.xp6(5),e.s9C("value",_.realm),e.xp6(5),e.s9C("value",_.token),e.xp6(1),e.s9C("source",_.token),e.Q6J("byId",!1),e.xp6(1),e.Q6J("ngIf",o.realms.length>1)}}function ys(t,n){if(1&t&&(e.TgZ(0,"div",12),e.YNc(1,Ds,3,1,"cd-alert-panel",13),e.YNc(2,vs,13,5,"div",14),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf",!_.tokenValid),e.xp6(1),e.Q6J("ngForOf",_.realms)}}let ws=(()=>{class t{constructor(_,o,i,s,l){this.activeModal=_,this.rgwRealmService=o,this.actionLabels=i,this.notificationService=s,this.changeDetectorRef=l,this.tokenValid=!1,this.loading=!0,this.icons=$.P,this.createForm()}createForm(){this.exportTokenForm=new V.d({})}onSubmit(){this.activeModal.close()}ngOnInit(){this.rgwRealmService.getRealmTokens().subscribe(_=>{this.loading=!1,this.realms=_;var o=new RegExp("^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})$");this.realms.forEach(i=>{this.tokenValid=!!o.test(i.token)})})}ngAfterViewChecked(){this.changeDetectorRef.detectChanges()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(de.y),e.Y36(I.p4),e.Y36(Y.g),e.Y36(e.sBO))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-export"]],decls:10,vars:4,consts:function(){let n,_,o,i;return n="Export Multi-Site Realm Token",_="" + "\ufffd*2:1\ufffd\ufffd#1:1\ufffd" + "" + "\ufffd#2:1\ufffd" + "" + "\ufffd0:1\ufffd" + "" + "\ufffd/#2:1\ufffd" + " - " + "\ufffd1:1\ufffd" + " " + "\ufffd/#1:1\ufffd\ufffd/*2:1\ufffd" + "",o="Realm Name ",i="Token ",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","exportTokenForm",3,"formGroup"],["frm","ngForm"],["class","d-flex justify-content-center",4,"ngIf"],["class","modal-body",4,"ngIf"],[1,"modal-footer"],["aria-label","Close",1,"m-2","float-end",3,"backAction"],[1,"d-flex","justify-content-center"],[3,"ngClass"],[1,"modal-body"],["type","warning","class","mx-3",4,"ngIf"],[4,"ngFor","ngForOf"],["type","warning",1,"mx-3"],_,[1,"form-group","row"],["for","realmName",1,"cd-col-form-label"],o,[1,"cd-col-form-input"],["id","realmName","name","realmName","type","text","readonly","",3,"value"],["for","token",1,"cd-col-form-label"],i,["id","realmToken","name","realmToken","type","text","readonly","",1,"me-2","mb-4",3,"value"],[3,"source","byId"],[4,"ngIf"]]},template:function(_,o){1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5),e.YNc(6,$s,2,5,"span",6),e.YNc(7,ys,3,2,"div",7),e.TgZ(8,"div",8)(9,"cd-back-button",9),e.NdJ("backAction",function(){return o.activeModal.close()}),e.qZA()()(),e.BQk(),e.qZA()),2&_&&(e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.exportTokenForm),e.xp6(2),e.Q6J("ngIf",o.loading),e.xp6(1),e.Q6J("ngIf",!o.loading))},dependencies:[T.mk,T.sg,T.O5,M_.W,X.z,Re.G,pe.s,k.o,z.P,H.V,a._Y,a.JL,a.sg]}),t})();var xs=c(7022),ks=c(22120),zs=c(48168),qs=c(14745),A_=c(79765),Hs=c(66682),Xs=c(54395),Bs=c(87519),Qs=c(45435),Ys=c(88002),I_=c(60192);function Js(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,30),e.qZA())}function Ks(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,31),e.qZA())}function Vs(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,32),e.qZA())}function js(t,n){1&t&&(e.TgZ(0,"div",9)(1,"label",33),e.SDv(2,34),e.qZA(),e.TgZ(3,"div",12)(4,"select",35)(5,"option",36),e.SDv(6,37),e.qZA(),e.TgZ(7,"option",38),e.SDv(8,39),e.qZA()()()())}function ea(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,43),e.qZA())}function _a(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",9)(1,"label",40),e.SDv(2,41),e.qZA(),e.TgZ(3,"div",12)(4,"input",42),e.NdJ("focus",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.labelFocus.next(i.target.value))})("click",function(i){e.CHM(_);const s=e.oxw();return e.KtG(s.labelClick.next(i.target.value))}),e.qZA(),e.YNc(5,ea,2,0,"span",14),e.qZA()()}if(2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(4),e.Q6J("ngbTypeahead",_.searchLabels),e.xp6(1),e.Q6J("ngIf",_.importTokenForm.showError("label",o,"required"))}}function ta(t,n){if(1&t&&(e.TgZ(0,"div",9)(1,"label",44),e.SDv(2,45),e.qZA(),e.TgZ(3,"div",12),e._UZ(4,"cd-select-badges",46),e.qZA()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("data",_.importTokenForm.controls.hosts.value)("options",_.hosts.options)("messages",_.hosts.messages)}}function oa(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,51),e.qZA())}function na(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,52),e.qZA())}function ia(t,n){if(1&t&&(e.TgZ(0,"div",9)(1,"label",47)(2,"span"),e.SDv(3,48),e.qZA(),e.TgZ(4,"cd-helper"),e.SDv(5,49),e.qZA()(),e.TgZ(6,"div",12),e._UZ(7,"input",50),e.YNc(8,oa,2,0,"span",14),e.YNc(9,na,2,0,"span",14),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(8),e.Q6J("ngIf",_.importTokenForm.showError("count",o,"min")),e.xp6(1),e.Q6J("ngIf",_.importTokenForm.showError("count",o,"pattern"))}}function sa(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,56),e.qZA())}function aa(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,57),e.qZA())}function la(t,n){1&t&&(e.TgZ(0,"span",29),e.SDv(1,58),e.qZA())}function ra(t,n){if(1&t&&(e.ynx(0),e.TgZ(1,"div",9)(2,"label",53),e.SDv(3,54),e.qZA(),e.TgZ(4,"div",12),e._UZ(5,"input",55),e.YNc(6,sa,2,0,"span",14),e.YNc(7,aa,2,0,"span",14),e.YNc(8,la,2,0,"span",14),e.qZA()(),e.BQk()),2&t){const _=e.oxw(),o=e.MAs(5);e.xp6(6),e.Q6J("ngIf",_.importTokenForm.showError("rgw_frontend_port",o,"pattern")),e.xp6(1),e.Q6J("ngIf",_.importTokenForm.showError("rgw_frontend_port",o,"min")),e.xp6(1),e.Q6J("ngIf",_.importTokenForm.showError("rgw_frontend_port",o,"max"))}}let ca=(()=>{class t{constructor(_,o,i,s,l){this.activeModal=_,this.hostService=o,this.rgwRealmService=i,this.actionLabels=s,this.notificationService=l,this.endpoints=/^((https?:\/\/)|(www.))(?:([a-zA-Z]+)|(\d+\.\d+.\d+.\d+)):\d{2,4}$/,this.ipv4Rgx=/^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/i,this.ipv6Rgx=/^(?:[a-f0-9]{1,4}:){7}[a-f0-9]{1,4}$/i,this.multisiteInfo=[],this.zoneList=[],this.labelClick=new A_.xQ,this.labelFocus=new A_.xQ,this.searchLabels=r=>(0,Hs.T)(r.pipe((0,Xs.b)(200),(0,Bs.x)()),this.labelFocus,this.labelClick.pipe((0,Qs.h)(()=>!this.typeahead.isPopupOpen()))).pipe((0,Ys.U)(d=>this.labels.filter(u=>u.toLowerCase().indexOf(d.toLowerCase())>-1).slice(0,10))),this.hosts={options:[],messages:new xs.a({empty:"There are no hosts.",filter:"Filter hosts"})},this.createForm()}ngOnInit(){this.zoneList=void 0!==this.multisiteInfo[2]&&this.multisiteInfo[2].hasOwnProperty("zones")?this.multisiteInfo[2].zones:[],this.zoneNames=this.zoneList.map(o=>o.name);const _=new zs.E(()=>{});this.hostService.list(_.toParams(),"false").subscribe(o=>{const i=[];E().forEach(o,s=>{if(E().get(s,"sources.orchestrator",!1)){const l=new qs.$(!1,E().get(s,"hostname"),"");i.push(l)}}),this.hosts.options=[...i]}),this.hostService.getLabels().subscribe(o=>{this.labels=o})}createForm(){this.importTokenForm=new V.d({realmToken:new a.NI("",{validators:[a.kI.required]}),zoneName:new a.NI(null,{validators:[a.kI.required,m.h.custom("uniqueName",_=>this.zoneNames&&-1!==this.zoneNames.indexOf(_))]}),rgw_frontend_port:new a.NI(null,{validators:[a.kI.required,a.kI.pattern("^[0-9]*$")]}),placement:new a.NI("hosts"),label:new a.NI(null,[m.h.requiredIf({placement:"label",unmanaged:!1})]),hosts:new a.NI([]),count:new a.NI(null,[m.h.number(!1)]),unmanaged:new a.NI(!1)})}onSubmit(){const _=this.importTokenForm.value,o={placement:{}};if(!_.unmanaged){switch(_.placement){case"hosts":_.hosts.length>0&&(o.placement.hosts=_.hosts);break;case"label":o.placement.label=_.label}E().isNumber(_.count)&&_.count>0&&(o.placement.count=_.count)}this.rgwRealmService.importRealmToken(_.realmToken,_.zoneName,_.rgw_frontend_port,o).subscribe(()=>{this.notificationService.show(w.k.success,"Realm token import successfull"),this.activeModal.close()},()=>{this.importTokenForm.setErrors({cdSubmitButton:!0})})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(ks.x),e.Y36(de.y),e.Y36(I.p4),e.Y36(Y.g))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-import"]],viewQuery:function(_,o){if(1&_&&e.Gf(G.dR,5),2&_){let i;e.iGM(i=e.CRH())&&(o.typeahead=i.first)}},decls:47,vars:12,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W,C,Z,D,U,v,y;return n="Import Multi-Site Token",_="Zone Details",o="Token ",i="Secondary Zone Name",s="Service Details",l="Unmanaged",r="If set to true, the orchestrator will not start nor stop any daemon associated with this service. Placement and all other properties will be ignored.",d="This field is required.",u="This field is required.",R="The chosen zone name is already in use.",O="Placement",F="Hosts",b="Label",h="Label",M="This field is required.",L="Hosts",S="Count",W="Only that number of daemons will be created.",C="The value must be at least 1.",Z="The entered value needs to be a number.",D="Port",U="The entered value needs to be a number.",v="The value must be at least 1.",y="The value cannot exceed 65535.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","importTokenForm",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],["type","info","spacingClass","mb-3"],_,[1,"form-group","row"],["for","realmToken",1,"cd-col-form-label","required"],o,[1,"cd-col-form-input"],["id","realmToken","name","realmToken","type","text","formControlName","realmToken",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","zoneName",1,"cd-col-form-label","required"],i,["type","text","placeholder","Zone name...","id","zoneName","name","zoneName","formControlName","zoneName",1,"form-control"],s,[1,"cd-col-form-offset"],[1,"custom-control","custom-checkbox"],["id","unmanaged","type","checkbox","formControlName","unmanaged",1,"custom-control-input"],["for","unmanaged",1,"custom-control-label"],l,r,["class","form-group row",4,"ngIf"],[4,"ngIf"],[1,"modal-footer"],[3,"submitText","form","submitActionEvent"],[1,"invalid-feedback"],d,u,R,["for","placement",1,"cd-col-form-label"],O,["id","placement","formControlName","placement",1,"form-select"],["value","hosts"],F,["value","label"],b,["for","label",1,"cd-col-form-label"],h,["id","label","type","text","formControlName","label",1,"form-control",3,"ngbTypeahead","focus","click"],M,["for","hosts",1,"cd-col-form-label"],L,["id","hosts",3,"data","options","messages"],["for","count",1,"cd-col-form-label"],S,W,["id","count","type","number","formControlName","count","min","1",1,"form-control"],C,Z,["for","rgw_frontend_port",1,"cd-col-form-label"],D,["id","rgw_frontend_port","type","number","formControlName","rgw_frontend_port","min","1","max","65535",1,"form-control"],U,v,y]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.BQk(),e.ynx(3,3),e.TgZ(4,"form",4,5)(6,"div",6)(7,"cd-alert-panel",7)(8,"ul")(9,"li"),e._uU(10,"This feature allows you to configure a connection between your primary and secondary Ceph clusters for data replication. By importing a token, you establish a link between the clusters, enabling data synchronization."),e.qZA(),e.TgZ(11,"li"),e._uU(12,"To obtain the token, generate it from your primary Ceph cluster. This token includes encoded information about the primary cluster's endpoint, access key, and secret key."),e.qZA(),e.TgZ(13,"li"),e._uU(14,"The secondary zone represents the destination cluster where your data will be replicated."),e.qZA()()(),e.TgZ(15,"legend"),e.SDv(16,8),e.qZA(),e.TgZ(17,"div",9)(18,"label",10),e.SDv(19,11),e.qZA(),e.TgZ(20,"div",12),e._UZ(21,"input",13),e.YNc(22,Js,2,0,"span",14),e.qZA()(),e.TgZ(23,"div",9)(24,"label",15),e.SDv(25,16),e.qZA(),e.TgZ(26,"div",12),e._UZ(27,"input",17),e.YNc(28,Ks,2,0,"span",14),e.YNc(29,Vs,2,0,"span",14),e.qZA()(),e.TgZ(30,"legend"),e.SDv(31,18),e.qZA(),e.TgZ(32,"div",9)(33,"div",19)(34,"div",20),e._UZ(35,"input",21),e.TgZ(36,"label",22),e.SDv(37,23),e.qZA(),e.TgZ(38,"cd-helper"),e.SDv(39,24),e.qZA()()()(),e.YNc(40,js,9,0,"div",25),e.YNc(41,_a,6,2,"div",25),e.YNc(42,ta,5,3,"div",25),e.YNc(43,ia,10,2,"div",25),e.YNc(44,ra,9,3,"ng-container",26),e.qZA(),e.TgZ(45,"div",27)(46,"cd-form-button-panel",28),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(5);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.Q6J("formGroup",o.importTokenForm),e.xp6(18),e.Q6J("ngIf",o.importTokenForm.showError("realmToken",i,"required")),e.xp6(6),e.Q6J("ngIf",o.importTokenForm.showError("zoneName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.importTokenForm.showError("zoneName",i,"uniqueName")),e.xp6(11),e.Q6J("ngIf",!o.importTokenForm.controls.unmanaged.value),e.xp6(1),e.Q6J("ngIf",!o.importTokenForm.controls.unmanaged.value&&"label"===o.importTokenForm.controls.placement.value),e.xp6(1),e.Q6J("ngIf",!o.importTokenForm.controls.unmanaged.value&&"hosts"===o.importTokenForm.controls.placement.value),e.xp6(1),e.Q6J("ngIf",!o.importTokenForm.controls.unmanaged.value),e.xp6(1),e.Q6J("ngIf",!o.importTokenForm.controls.unmanaged.value),e.xp6(2),e.Q6J("submitText",o.actionLabels.IMPORT)("form",o.importTokenForm)}},dependencies:[T.O5,j.S,I_.m,X.z,Re.G,B.p,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.wV,a.Wl,a.EJ,a.JJ,a.JL,a.qQ,a.Fd,a.sg,a.u]}),t})();var da=c(72625);function ua(t,n){1&t&&(e.TgZ(0,"span",20),e.SDv(1,21),e.qZA())}function ga(t,n){1&t&&(e.TgZ(0,"span",20),e.SDv(1,22),e.qZA())}function Ra(t,n){1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.SDv(2,23),e.qZA()())}function Ta(t,n){if(1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,24),e._UZ(3,"a",25),e.N_p(),e.qZA()()),2&t){const _=e.oxw();e.xp6(3),e.s9C("href",_.docUrl,e.LSH)}}function Ea(t,n){1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.SDv(2,26),e.qZA()())}let fa=(()=>{class t{constructor(_,o,i,s,l){this.activeModal=_,this.actionLabels=o,this.rgwRealmService=i,this.notificationService=s,this.docService=l,this.editing=!1,this.multisiteInfo=[],this.realmList=[],this.zonegroupList=[],this.defaultRealmDisabled=!1,this.action=this.editing?this.actionLabels.EDIT+this.resource:this.actionLabels.CREATE+this.resource,this.createForm()}createForm(){this.multisiteRealmForm=new V.d({realmName:new a.p4(null,{validators:[a.kI.required,m.h.custom("uniqueName",_=>"create"===this.action&&this.realmNames&&-1!==this.realmNames.indexOf(_))]}),default_realm:new a.p4(!1)})}ngOnInit(){this.realmList=void 0!==this.multisiteInfo[0]&&this.multisiteInfo[0].hasOwnProperty("realms")?this.multisiteInfo[0].realms:[],this.realmNames=this.realmList.map(_=>_.name),"edit"===this.action&&(this.zonegroupList=void 0!==this.multisiteInfo[1]&&this.multisiteInfo[1].hasOwnProperty("zonegroups")?this.multisiteInfo[1].zonegroups:[],this.multisiteRealmForm.get("realmName").setValue(this.info.data.name),this.multisiteRealmForm.get("default_realm").setValue(this.info.data.is_default),this.info.data.is_default&&this.multisiteRealmForm.get("default_realm").disable()),this.zonegroupList.forEach(_=>{!0===_.is_master&&_.realm_id===this.info.data.id&&(this.isMaster=!0)}),this.defaultsInfo&&null!==this.defaultsInfo.defaultRealmName&&(this.multisiteRealmForm.get("default_realm").disable(),this.defaultRealmDisabled=!0),this.docUrl=this.docService.urlGenerator("rgw-multisite")}submit(){const _=this.multisiteRealmForm.getRawValue();this.realm=new x.L6,"create"===this.action?(this.realm.name=_.realmName,this.rgwRealmService.create(this.realm,_.default_realm).subscribe(()=>{this.notificationService.show(w.k.success,"Realm: '" + _.realmName + "' created successfully"),this.activeModal.close()},()=>{this.multisiteRealmForm.setErrors({cdSubmitButton:!0})})):"edit"===this.action&&(this.realm.name=this.info.data.name,this.newRealmName=_.realmName,this.rgwRealmService.update(this.realm,_.default_realm,this.newRealmName).subscribe(()=>{this.notificationService.show(w.k.success,"Realm: '" + _.realmName + "' updated successfully"),this.activeModal.close()},()=>{this.multisiteRealmForm.setErrors({cdSubmitButton:!0})}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(I.p4),e.Y36(de.y),e.Y36(Y.g),e.Y36(da.R))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-realm-form"]],decls:27,vars:20,consts:function(){let n,_,o,i,s,l,r,d;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Realm Name",o="Default",i="This field is required.",s="The chosen realm name is already in use.",l="You cannot unset the default flag.",r="Please consult the " + "\ufffd#3\ufffd" + "documentation" + "\ufffd/#3\ufffd" + " to follow the failover mechanism",d="Default realm already exists.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","multisiteRealmForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","realmName",1,"cd-col-form-label","required"],_,[1,"cd-col-form-input"],["type","text","placeholder","Realm name...","id","realmName","name","realmName","formControlName","realmName",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"custom-control","custom-checkbox"],["id","default_realm","name","default_realm","formControlName","default_realm","type","checkbox",1,"form-check-input"],["for","default_realm",1,"form-check-label"],o,[4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],i,s,l,r,[3,"href"],d]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,ua,2,0,"span",12),e.YNc(15,ga,2,0,"span",12),e.TgZ(16,"div",13),e._UZ(17,"input",14),e.TgZ(18,"label",15),e.SDv(19,16),e.qZA(),e.YNc(20,Ra,3,0,"cd-helper",17),e.YNc(21,Ta,4,1,"cd-helper",17),e.YNc(22,Ea,3,0,"cd-helper",17),e.qZA()()()(),e.TgZ(23,"div",18)(24,"cd-form-button-panel",19),e.NdJ("submitActionEvent",function(){return o.submit()}),e.ALo(25,"titlecase"),e.ALo(26,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,12,o.action))(e.lcZ(4,14,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.multisiteRealmForm),e.xp6(8),e.Q6J("ngIf",o.multisiteRealmForm.showError("realmName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteRealmForm.showError("realmName",i,"uniqueName")),e.xp6(2),e.uIk("disabled","edit"===o.action||null),e.xp6(3),e.Q6J("ngIf","edit"===o.action&&o.info.data.is_default),e.xp6(1),e.Q6J("ngIf","edit"===o.action&&!o.info.data.is_default),e.xp6(1),e.Q6J("ngIf",o.defaultRealmDisabled&&"create"===o.action),e.xp6(2),e.Q6J("form",o.multisiteRealmForm)("submitText",e.lcZ(25,16,o.action)+" "+e.lcZ(26,18,o.resource))}},dependencies:[T.O5,j.S,X.z,B.p,k.o,q.b,z.P,H.V,a._Y,a.Fj,a.Wl,a.JJ,a.JL,a.sg,a.u,T.rS,_e.m]}),t})();function pa(t,n){if(1&t&&(e.TgZ(0,"option",36),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw();e.Q6J("value",_.name)("selected",_.name===o.multisiteZoneForm.getValue("selectedZonegroup")),e.xp6(1),e.hij(" ",_.name," ")}}function ma(t,n){1&t&&(e.TgZ(0,"span",37),e.SDv(1,38),e.qZA())}function Ma(t,n){1&t&&(e.TgZ(0,"span",37),e.SDv(1,39),e.qZA())}function Sa(t,n){1&t&&(e.TgZ(0,"span")(1,"cd-helper"),e.SDv(2,40),e.qZA()())}function Ca(t,n){1&t&&(e.TgZ(0,"span")(1,"cd-helper"),e.SDv(2,41),e.qZA()())}function Oa(t,n){if(1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,42),e._UZ(3,"a",43),e.N_p(),e.qZA()()),2&t){const _=e.oxw();e.xp6(3),e.s9C("href",_.docUrl,e.LSH)}}function Fa(t,n){1&t&&(e.TgZ(0,"span")(1,"cd-helper"),e.SDv(2,44),e.qZA()())}function Pa(t,n){1&t&&(e.TgZ(0,"span")(1,"cd-helper"),e.SDv(2,45),e.qZA()())}function Na(t,n){if(1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,46),e._UZ(3,"a",43),e.N_p(),e.qZA()()),2&t){const _=e.oxw();e.xp6(3),e.s9C("href",_.docUrl,e.LSH)}}function Ga(t,n){1&t&&(e.TgZ(0,"span",37),e.SDv(1,47),e.qZA())}function Aa(t,n){1&t&&(e.TgZ(0,"span",37),e.SDv(1,48),e.qZA())}function Ia(t,n){if(1&t&&(e.TgZ(0,"option",36),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(3);e.Q6J("value",_.name)("selected",_.name===o.multisiteZoneForm.getValue("placementTarget")),e.xp6(1),e.hij(" ",_.name," ")}}function ba(t,n){if(1&t&&(e.TgZ(0,"option",36),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(3);e.Q6J("value",_.poolname)("selected",_.poolname===o.multisiteZoneForm.getValue("placementDataPool")),e.xp6(1),e.hij(" ",_.poolname," ")}}function ha(t,n){if(1&t&&(e.TgZ(0,"option",36),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(3);e.Q6J("value",_.poolname)("selected",_.poolname===o.multisiteZoneForm.getValue("placementIndexPool")),e.xp6(1),e.hij(" ",_.poolname," ")}}function La(t,n){if(1&t&&(e.TgZ(0,"option",36),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(3);e.Q6J("value",_.poolname)("selected",_.poolname===o.multisiteZoneForm.getValue("placementDataExtraPool")),e.xp6(1),e.hij(" ",_.poolname," ")}}function Wa(t,n){if(1&t&&(e.TgZ(0,"option",71),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_.value),e.xp6(1),e.hij(" ",_.value," ")}}function $a(t,n){if(1&t&&(e.TgZ(0,"option",36),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(3);e.Q6J("value",_.poolname)("selected",_.poolname===o.multisiteZoneForm.getValue("storageDataPool")),e.xp6(1),e.hij(" ",_.poolname," ")}}function Za(t,n){if(1&t&&(e.TgZ(0,"option",71),e._uU(1),e.qZA()),2&t){const _=n.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Da(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"legend"),e._uU(2,"Placement Targets"),e.qZA(),e.TgZ(3,"div",7)(4,"label",49),e.SDv(5,50),e.qZA(),e.TgZ(6,"div",10)(7,"select",51),e.NdJ("change",function(i){e.CHM(_);const s=e.oxw(2);return e.KtG(s.getZonePlacementData(i.target.value))}),e.YNc(8,Ia,2,3,"option",12),e.qZA()()(),e.TgZ(9,"div",7)(10,"label",52),e.SDv(11,53),e.qZA(),e.TgZ(12,"div",10)(13,"select",54),e.YNc(14,ba,2,3,"option",12),e.qZA()()(),e.TgZ(15,"div",7)(16,"label",55),e.SDv(17,56),e.qZA(),e.TgZ(18,"div",10)(19,"select",57),e.YNc(20,ha,2,3,"option",12),e.qZA()()(),e.TgZ(21,"div",7)(22,"label",58),e.SDv(23,59),e.qZA(),e.TgZ(24,"div",10)(25,"select",60),e.YNc(26,La,2,3,"option",12),e.qZA()()(),e.TgZ(27,"div")(28,"legend"),e._uU(29,"Storage Classes"),e.qZA(),e.TgZ(30,"div",7)(31,"label",61),e.SDv(32,62),e.qZA(),e.TgZ(33,"div",10)(34,"select",63),e.NdJ("change",function(i){e.CHM(_);const s=e.oxw(2);return e.KtG(s.getStorageClassData(i.target.value))}),e.YNc(35,Wa,2,2,"option",64),e.qZA()()(),e.TgZ(36,"div",7)(37,"label",65),e.SDv(38,66),e.qZA(),e.TgZ(39,"div",10)(40,"select",67),e.YNc(41,$a,2,3,"option",12),e.qZA()()(),e.TgZ(42,"div",7)(43,"label",68),e.SDv(44,69),e.qZA(),e.TgZ(45,"div",10)(46,"select",70),e.YNc(47,Za,2,2,"option",64),e.qZA()()()()()}if(2&t){const _=e.oxw(2);e.xp6(8),e.Q6J("ngForOf",_.placementTargets),e.xp6(5),e.Q6J("value",_.placementDataPool),e.xp6(1),e.Q6J("ngForOf",_.poolList),e.xp6(6),e.Q6J("ngForOf",_.poolList),e.xp6(6),e.Q6J("ngForOf",_.poolList),e.xp6(9),e.Q6J("ngForOf",_.storageClassList),e.xp6(6),e.Q6J("ngForOf",_.poolList),e.xp6(6),e.Q6J("ngForOf",_.compressionTypes)}}function Ua(t,n){if(1&t&&(e.TgZ(0,"div",7),e.YNc(1,Da,48,8,"div",21),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngIf","edit"===_.action)}}let va=(()=>{class t{constructor(_,o,i,s,l,r,d,u){this.activeModal=_,this.actionLabels=o,this.rgwMultisiteService=i,this.rgwZoneService=s,this.rgwZoneGroupService=l,this.notificationService=r,this.rgwUserService=d,this.modalService=u,this.endpoints=/^((https?:\/\/)|(www.))(?:([a-zA-Z]+)|(\d+\.\d+.\d+.\d+)):\d{2,4}$/,this.ipv4Rgx=/^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/i,this.ipv6Rgx=/^(?:[a-f0-9]{1,4}:){7}[a-f0-9]{1,4}$/i,this.editing=!1,this.defaultsInfo=[],this.multisiteInfo=[],this.zonegroupList=[],this.zoneList=[],this.poolList=[],this.storageClassList=[],this.disableDefault=!1,this.disableMaster=!1,this.isMetadataSync=!1,this.syncStatusTimedOut=!1,this.createSystemUser=!1,this.compressionTypes=["lz4","zlib","snappy"],this.userListReady=!1,this.action=this.editing?this.actionLabels.EDIT+this.resource:this.actionLabels.CREATE+this.resource,this.createForm()}createForm(){this.multisiteZoneForm=new V.d({zoneName:new a.p4(null,{validators:[a.kI.required,m.h.custom("uniqueName",_=>"create"===this.action&&this.zoneNames&&-1!==this.zoneNames.indexOf(_))]}),default_zone:new a.p4(!1),master_zone:new a.p4(!1),selectedZonegroup:new a.p4(null),zone_endpoints:new a.p4(null,{validators:[m.h.custom("endpoint",_=>!(E().isEmpty(_)||(_.includes(",")?(_.split(",").forEach(o=>!this.endpoints.test(o)&&!this.ipv4Rgx.test(o)&&!this.ipv6Rgx.test(o)),1):this.endpoints.test(_)||this.ipv4Rgx.test(_)||this.ipv6Rgx.test(_)))),a.kI.required]}),access_key:new a.p4(null,a.kI.required),secret_key:new a.p4(null,a.kI.required),placementTarget:new a.p4(null),placementDataPool:new a.p4(""),placementIndexPool:new a.p4(null),placementDataExtraPool:new a.p4(null),storageClass:new a.p4(null),storageDataPool:new a.p4(null),storageCompression:new a.p4(null)})}onZoneGroupChange(_){let o=new x.iG;o.name=_,this.rgwZoneGroupService.get(o).subscribe(i=>{E().isEmpty(i.master_zone)?(this.multisiteZoneForm.get("master_zone").setValue(!0),this.multisiteZoneForm.get("master_zone").disable(),this.disableMaster=!1):!E().isEmpty(i.master_zone)&&"create"===this.action&&(this.multisiteZoneForm.get("master_zone").setValue(!1),this.multisiteZoneForm.get("master_zone").disable(),this.disableMaster=!0)}),this.multisiteZoneForm.getValue("selectedZonegroup")!==this.defaultsInfo.defaultZonegroupName&&(this.disableDefault=!0,this.multisiteZoneForm.get("default_zone").disable())}ngOnInit(){this.zonegroupList=void 0!==this.multisiteInfo[1]&&this.multisiteInfo[1].hasOwnProperty("zonegroups")?this.multisiteInfo[1].zonegroups:[],this.zoneList=void 0!==this.multisiteInfo[2]&&this.multisiteInfo[2].hasOwnProperty("zones")?this.multisiteInfo[2].zones:[],this.zoneNames=this.zoneList.map(_=>_.name),"create"===this.action&&void 0!==this.defaultsInfo.defaultZonegroupName&&(this.multisiteZoneForm.get("selectedZonegroup").setValue(this.defaultsInfo.defaultZonegroupName),this.onZoneGroupChange(this.defaultsInfo.defaultZonegroupName)),"edit"===this.action&&(this.placementTargets=this.info.parent?this.info.parent.data.placement_targets:[],this.rgwZoneService.getPoolNames().subscribe(o=>{this.poolList=o}),this.multisiteZoneForm.get("zoneName").setValue(this.info.data.name),this.multisiteZoneForm.get("selectedZonegroup").setValue(this.info.data.parent),this.multisiteZoneForm.get("default_zone").setValue(this.info.data.is_default),this.multisiteZoneForm.get("master_zone").setValue(this.info.data.is_master),this.multisiteZoneForm.get("zone_endpoints").setValue(this.info.data.endpoints.toString()),this.multisiteZoneForm.get("access_key").setValue(this.info.data.access_key),this.multisiteZoneForm.get("secret_key").setValue(this.info.data.secret_key),this.multisiteZoneForm.get("placementTarget").setValue(this.info.parent.data.default_placement),this.getZonePlacementData(this.multisiteZoneForm.getValue("placementTarget")),this.info.data.is_default&&(this.isDefaultZone=!0,this.multisiteZoneForm.get("default_zone").disable()),this.info.data.is_master&&(this.isMasterZone=!0,this.multisiteZoneForm.get("master_zone").disable()),(new x.jb).name=this.info.data.name,this.onZoneGroupChange(this.info.data.parent)),this.multisiteZoneForm.getValue("selectedZonegroup")!==this.defaultsInfo.defaultZonegroupName&&(this.disableDefault=!0,this.multisiteZoneForm.get("default_zone").disable())}getZonePlacementData(_){this.zone=new x.jb,this.zone.name=this.info.data.name,this.placementTargets&&this.placementTargets.forEach(o=>{o.name===_&&(this.storageClassList=Object.entries(o.storage_classes).map(([s,l])=>({key:s,value:l})))}),this.rgwZoneService.get(this.zone).subscribe(o=>{this.zoneInfo=o,this.zoneInfo&&this.zoneInfo.placement_pools&&this.zoneInfo.placement_pools.forEach(i=>{if(i.key===_){let s=i.val.storage_classes,l=s.STANDARD?s.STANDARD.data_pool:"",r=i.val.index_pool,d=i.val.data_extra_pool;this.poolList.push({poolname:l}),this.poolList.push({poolname:r}),this.poolList.push({poolname:d}),this.multisiteZoneForm.get("storageClass").setValue(this.storageClassList[0].value),this.multisiteZoneForm.get("storageDataPool").setValue(l),this.multisiteZoneForm.get("storageCompression").setValue(this.compressionTypes[0]),this.multisiteZoneForm.get("placementDataPool").setValue(l),this.multisiteZoneForm.get("placementIndexPool").setValue(r),this.multisiteZoneForm.get("placementDataExtraPool").setValue(d)}})})}getStorageClassData(_){let o=this.storageClassList.find(i=>i.value==_).value;this.poolList.push({poolname:o.data_pool}),this.multisiteZoneForm.get("storageDataPool").setValue(o.data_pool),this.multisiteZoneForm.get("storageCompression").setValue(o.compression_type)}submit(){const _=this.multisiteZoneForm.getRawValue();"create"===this.action?(this.zonegroup=new x.iG,this.zonegroup.name=_.selectedZonegroup,this.zone=new x.jb,this.zone.name=_.zoneName,this.zone.endpoints=_.zone_endpoints,this.zone.system_key=new x.VY,this.zone.system_key.access_key=_.access_key,this.zone.system_key.secret_key=_.secret_key,this.rgwZoneService.create(this.zone,this.zonegroup,_.default_zone,_.master_zone,this.zone.endpoints).subscribe(()=>{this.notificationService.show(w.k.success,"Zone: '" + _.zoneName + "' created successfully"),this.activeModal.close()},()=>{this.multisiteZoneForm.setErrors({cdSubmitButton:!0})})):"edit"===this.action&&(this.zonegroup=new x.iG,this.zonegroup.name=_.selectedZonegroup,this.zone=new x.jb,this.zone.name=this.info.data.name,this.zone.endpoints=_.zone_endpoints,this.zone.system_key=new x.VY,this.zone.system_key.access_key=_.access_key,this.zone.system_key.secret_key=_.secret_key,this.rgwZoneService.update(this.zone,this.zonegroup,_.zoneName,_.default_zone,_.master_zone,this.zone.endpoints,_.placementTarget,_.placementDataPool,_.placementIndexPool,_.placementDataExtraPool,_.storageClass,_.storageDataPool,_.storageCompression).subscribe(()=>{this.notificationService.show(w.k.success,"Zone: '" + _.zoneName + "' updated successfully"),this.activeModal.close()},()=>{this.multisiteZoneForm.setErrors({cdSubmitButton:!0})}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(I.p4),e.Y36(e_.o),e.Y36(ue.g),e.Y36(ge.K),e.Y36(Y.g),e.Y36(Q),e.Y36(ee.Z))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-zone-form"]],decls:71,vars:29,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W,C,Z,D,U,v,y,f;return n="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Select Zone Group",o="Zone Name",i="Default",s="Master",l="Endpoints",r="S3 access key " + "\ufffd#47\ufffd" + "" + "\ufffd#48\ufffd" + "To see or copy your S3 access key, go to " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Object Gateway > Users" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + " and click on your user name. In " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Keys" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + ", click " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Show" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + ". View the access key by clicking Show and copy the key by clicking " + "[\ufffd#49\ufffd|\ufffd#50\ufffd|\ufffd#51\ufffd|\ufffd#52\ufffd]" + "Copy to Clipboard" + "[\ufffd/#49\ufffd|\ufffd/#50\ufffd|\ufffd/#51\ufffd|\ufffd/#52\ufffd]" + "." + "\ufffd/#48\ufffd" + "" + "\ufffd/#47\ufffd" + "",r=e.Zx4(r),d="S3 secret key " + "\ufffd#58\ufffd" + "" + "\ufffd#59\ufffd" + "To see or copy your S3 access key, go to " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Object Gateway > Users" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + " and click on your user name. In " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Keys" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + ", click " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Show" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + ". View the secret key by clicking Show and copy the key by clicking " + "[\ufffd#60\ufffd|\ufffd#61\ufffd|\ufffd#62\ufffd|\ufffd#63\ufffd]" + "Copy to Clipboard" + "[\ufffd/#60\ufffd|\ufffd/#61\ufffd|\ufffd/#62\ufffd|\ufffd/#63\ufffd]" + "." + "\ufffd/#59\ufffd" + "" + "\ufffd/#58\ufffd" + "",d=e.Zx4(d),u="This field is required.",R="The chosen zone name is already in use.",O="Default zone can only exist in a default zone group. ",F="You cannot unset the default flag. ",b="Please consult the " + "\ufffd#3\ufffd" + "documentation" + "\ufffd/#3\ufffd" + " to follow the failover mechanism",h="Master zone already exists for the selected zone group. ",M="You cannot unset the master flag. ",L="Please consult the " + "\ufffd#3\ufffd" + "documentation" + "\ufffd/#3\ufffd" + " to follow the failover mechanism",S="This field is required.",W="Please enter a valid IP address.",C="Placement target",Z="Data pool",D="Index pool",U="Data extra pool",v="Storage Class",y="Data pool",f="Compression",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","multisiteZoneForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","selectedZonegroup",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","selectedZonegroup","formControlName","selectedZonegroup","name","selectedZonegroup",1,"form-select",3,"change"],[3,"value","selected",4,"ngFor","ngForOf"],["for","zonegroupName",1,"cd-col-form-label","required"],o,["type","text","placeholder","Zone name...","id","zoneName","name","zoneName","formControlName","zoneName",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"custom-control","custom-checkbox"],["id","default_zone","name","default_zone","formControlName","default_zone","type","checkbox",1,"form-check-input"],["for","default_zone",1,"form-check-label"],i,[4,"ngIf"],["id","master_zone","name","master_zone","formControlName","master_zone","type","checkbox",1,"form-check-input"],["for","master_zone",1,"form-check-label"],s,["for","zone_endpoints",1,"cd-col-form-label","required"],l,["type","text","placeholder","e.g, http://ceph-node-00.com:80","id","zone_endpoints","name","zone_endpoints","formControlName","zone_endpoints",1,"form-control"],["for","access_key",1,"cd-col-form-label","required"],r,["type","text","placeholder","DiPt4V7WWvy2njL1z6aC","id","access_key","name","access_key","formControlName","access_key",1,"form-control"],d,["type","text","placeholder","xSZUdYky0bTctAdCEEW8ikhfBVKsBV5LFYL82vvh","id","secret_key","name","secret_key","formControlName","secret_key",1,"form-control"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[3,"value","selected"],[1,"invalid-feedback"],u,R,O,F,b,[3,"href"],h,M,L,S,W,["for","placementTarget",1,"cd-col-form-label"],C,["id","placementTarget","formControlName","placementTarget","name","placementTarget",1,"form-select",3,"change"],["for","placementDataPool",1,"cd-col-form-label"],Z,["id","placementDataPool","formControlName","placementDataPool","name","placementDataPool",1,"form-select",3,"value"],["for","placementIndexPool",1,"cd-col-form-label"],D,["id","placementIndexPool","formControlName","placementIndexPool","name","placementIndexPool",1,"form-select"],["for","placementDataExtraPool",1,"cd-col-form-label"],U,["id","placementDataExtraPool","formControlName","placementDataExtraPool","name","placementDataExtraPool",1,"form-select"],["for","storageClass",1,"cd-col-form-label"],v,["id","storageClass","formControlName","storageClass","name","storageClass",1,"form-select",3,"change"],[3,"value",4,"ngFor","ngForOf"],["for","storageDataPool",1,"cd-col-form-label"],y,["id","storageDataPool","formControlName","storageDataPool","name","storageDataPool",1,"form-select"],["for","storageCompression",1,"cd-col-form-label"],f,["id","storageCompression","formControlName","storageCompression","name","storageCompression",1,"form-select"],[3,"value"]]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10)(13,"select",11),e.NdJ("change",function(s){return o.onZoneGroupChange(s.target.value)}),e.YNc(14,pa,2,3,"option",12),e.qZA()()(),e.TgZ(15,"div",7)(16,"label",13),e.SDv(17,14),e.qZA(),e.TgZ(18,"div",10),e._UZ(19,"input",15),e.YNc(20,ma,2,0,"span",16),e.YNc(21,Ma,2,0,"span",16),e.TgZ(22,"div",17),e._UZ(23,"input",18),e.TgZ(24,"label",19),e.SDv(25,20),e.qZA(),e.YNc(26,Sa,3,0,"span",21),e.YNc(27,Ca,3,0,"span",21),e.YNc(28,Oa,4,1,"cd-helper",21),e._UZ(29,"br"),e.qZA(),e.TgZ(30,"div",17),e._UZ(31,"input",22),e.TgZ(32,"label",23),e.SDv(33,24),e.qZA(),e.YNc(34,Fa,3,0,"span",21),e.YNc(35,Pa,3,0,"span",21),e.YNc(36,Na,4,1,"cd-helper",21),e.qZA()()(),e.TgZ(37,"div",7)(38,"label",25),e.SDv(39,26),e.qZA(),e.TgZ(40,"div",10),e._UZ(41,"input",27),e.YNc(42,Ga,2,0,"span",16),e.YNc(43,Aa,2,0,"span",16),e.qZA()(),e.TgZ(44,"div",7)(45,"label",28),e.tHW(46,29),e.TgZ(47,"cd-helper")(48,"span"),e._UZ(49,"b")(50,"b")(51,"b")(52,"b"),e.qZA()(),e.N_p(),e.qZA(),e.TgZ(53,"div",10),e._UZ(54,"input",30),e.qZA()(),e.TgZ(55,"div",7)(56,"label",28),e.tHW(57,31),e.TgZ(58,"cd-helper")(59,"span"),e._UZ(60,"b")(61,"b")(62,"b")(63,"b"),e.qZA()(),e.N_p(),e.qZA(),e.TgZ(64,"div",10),e._UZ(65,"input",32),e.qZA()(),e.YNc(66,Ua,2,1,"div",33),e.qZA(),e.TgZ(67,"div",34)(68,"cd-form-button-panel",35),e.NdJ("submitActionEvent",function(){return o.submit()}),e.ALo(69,"titlecase"),e.ALo(70,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,21,o.action))(e.lcZ(4,23,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.multisiteZoneForm),e.xp6(7),e.uIk("disabled","edit"===o.action||null),e.xp6(1),e.Q6J("ngForOf",o.zonegroupList),e.xp6(6),e.Q6J("ngIf",o.multisiteZoneForm.showError("zoneName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteZoneForm.showError("zoneName",i,"uniqueName")),e.xp6(2),e.uIk("disabled","edit"===o.action||null),e.xp6(3),e.Q6J("ngIf",o.disableDefault&&"create"===o.action),e.xp6(1),e.Q6J("ngIf",o.isDefaultZone),e.xp6(1),e.Q6J("ngIf","edit"===o.action&&!o.isDefaultZone),e.xp6(3),e.uIk("disabled","edit"===o.action||null),e.xp6(3),e.Q6J("ngIf",o.disableMaster),e.xp6(1),e.Q6J("ngIf",o.isMasterZone),e.xp6(1),e.Q6J("ngIf","edit"===o.action&&!o.isMasterZone),e.xp6(6),e.Q6J("ngIf",o.multisiteZoneForm.showError("zone_endpoints",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteZoneForm.showError("zone_endpoints",i,"endpoint")),e.xp6(23),e.Q6J("ngIf","edit"===o.action),e.xp6(2),e.Q6J("form",o.multisiteZoneForm)("submitText",e.lcZ(69,25,o.action)+" "+e.lcZ(70,27,o.resource))}},dependencies:[T.sg,T.O5,j.S,X.z,B.p,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.Wl,a.EJ,a.JJ,a.JL,a.sg,a.u,T.rS,_e.m]}),t})();var ya=c(36569);function wa(t,n){if(1&t&&(e.TgZ(0,"option",33),e._uU(1),e.qZA()),2&t){const _=n.$implicit,o=e.oxw();e.Q6J("value",_.name)("selected",_.name===o.multisiteZonegroupForm.getValue("selectedRealm")),e.xp6(1),e.hij(" ",_.name," ")}}function xa(t,n){1&t&&(e.TgZ(0,"span",34),e.SDv(1,35),e.qZA())}function ka(t,n){1&t&&(e.TgZ(0,"span",34),e.SDv(1,36),e.qZA())}function za(t,n){1&t&&(e.TgZ(0,"span")(1,"cd-helper"),e.SDv(2,37),e.qZA()())}function qa(t,n){if(1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,38),e._UZ(3,"a",39),e.N_p(),e.qZA()()),2&t){const _=e.oxw();e.xp6(3),e.s9C("href",_.docUrl,e.LSH)}}function Ha(t,n){1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.SDv(2,40),e.qZA()())}function Xa(t,n){1&t&&(e.TgZ(0,"span")(1,"cd-helper"),e.SDv(2,41),e.qZA()())}function Ba(t,n){if(1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.tHW(2,42),e._UZ(3,"a",39),e.N_p(),e.qZA()()),2&t){const _=e.oxw();e.xp6(3),e.s9C("href",_.docUrl,e.LSH)}}function Qa(t,n){1&t&&(e.TgZ(0,"cd-helper")(1,"span"),e.SDv(2,43),e.qZA()())}function Ya(t,n){1&t&&(e.TgZ(0,"span",34),e.SDv(1,44),e.qZA())}function Ja(t,n){1&t&&(e.TgZ(0,"span",34),e.SDv(1,45),e.qZA())}function Ka(t,n){1&t&&(e.TgZ(0,"span",34),e.SDv(1,49),e.qZA())}function Va(t,n){if(1&t&&(e.TgZ(0,"div",7)(1,"label",46),e.SDv(2,47),e.qZA(),e.TgZ(3,"div",10),e._UZ(4,"cd-select-badges",48)(5,"br"),e.YNc(6,Ka,2,0,"span",18),e.qZA()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("data",_.zonegroupZoneNames)("options",_.labelsOption)("customBadges",!0),e.xp6(2),e.Q6J("ngIf",_.isRemoveMasterZone)}}function ja(t,n){1&t&&(e.TgZ(0,"span"),e.SDv(1,68),e.qZA())}function el(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",55)(2,"div",56),e._uU(3),e.ALo(4,"ordinal"),e.TgZ(5,"span",57),e.NdJ("click",function(){const s=e.CHM(_).index,l=e.oxw(2);return e.KtG(l.removePlacementTarget(s))}),e._uU(6,"\xd7"),e.qZA()(),e.TgZ(7,"div",58)(8,"div",7)(9,"label",59),e.SDv(10,60),e.qZA(),e.TgZ(11,"div",10),e._UZ(12,"input",61),e.TgZ(13,"span",34),e.YNc(14,ja,2,0,"span",23),e.qZA()()(),e.TgZ(15,"div",7)(16,"label",62),e.SDv(17,63),e.qZA(),e.TgZ(18,"div",10),e._UZ(19,"input",64),e.qZA()(),e.TgZ(20,"div",7)(21,"label",65),e.SDv(22,66),e.qZA(),e.TgZ(23,"div",10),e._UZ(24,"input",67),e.qZA()()()()()}if(2&t){const _=n.$implicit,o=n.index,i=e.oxw(2),s=e.MAs(6);e.xp6(1),e.Q6J("formGroup",_),e.xp6(2),e.hij(" ",e.lcZ(4,3,o+1)," "),e.xp6(11),e.Q6J("ngIf",i.showError(o,"placement_id",s,"required"))}}const _l=function(t){return[t]};function tl(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"legend"),e._uU(2,"Placement targets"),e.qZA(),e.ynx(3,50),e.YNc(4,el,25,5,"div",51),e.BQk(),e.TgZ(5,"button",52),e.NdJ("click",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.addPlacementTarget())}),e._UZ(6,"i",53),e.ynx(7),e.SDv(8,54),e.BQk(),e.qZA()()}if(2&t){const _=e.oxw();e.xp6(4),e.Q6J("ngForOf",_.placementTargets.controls)("ngForTrackBy",_.trackByFn),e.xp6(2),e.Q6J("ngClass",e.VKq(3,_l,_.icons.add))}}let ol=(()=>{class t{constructor(_,o,i,s,l){this.activeModal=_,this.actionLabels=o,this.rgwZonegroupService=i,this.notificationService=s,this.formBuilder=l,this.endpoints=/^((https?:\/\/)|(www.))(?:([a-zA-Z]+)|(\d+\.\d+.\d+.\d+)):\d{2,4}$/,this.ipv4Rgx=/^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/i,this.ipv6Rgx=/^(?:[a-f0-9]{1,4}:){7}[a-f0-9]{1,4}$/i,this.icons=$.P,this.editing=!1,this.defaultsInfo=[],this.multisiteInfo=[],this.realmList=[],this.zonegroupList=[],this.isMaster=!1,this.labelsOption=[],this.zoneList=[],this.isRemoveMasterZone=!1,this.disableDefault=!1,this.disableMaster=!1,this.action=this.editing?this.actionLabels.EDIT+this.resource:this.actionLabels.CREATE+this.resource,this.createForm()}createForm(){this.multisiteZonegroupForm=new V.d({default_zonegroup:new a.p4(!1),zonegroupName:new a.p4(null,{validators:[a.kI.required,m.h.custom("uniqueName",_=>"create"===this.action&&this.zonegroupNames&&-1!==this.zonegroupNames.indexOf(_))]}),master_zonegroup:new a.p4(!1),selectedRealm:new a.p4(null),zonegroup_endpoints:new a.p4(null,[m.h.custom("endpoint",_=>!(E().isEmpty(_)||(_.includes(",")?(_.split(",").forEach(o=>!this.endpoints.test(o)&&!this.ipv4Rgx.test(o)&&!this.ipv6Rgx.test(o)),1):this.endpoints.test(_)||this.ipv4Rgx.test(_)||this.ipv6Rgx.test(_)))),a.kI.required]),placementTargets:this.formBuilder.array([])})}ngOnInit(){E().forEach(this.multisiteZonegroupForm.get("placementTargets"),s=>{this.addPlacementTarget().patchValue(s)}),this.placementTargets=this.multisiteZonegroupForm.get("placementTargets"),this.realmList=void 0!==this.multisiteInfo[0]&&this.multisiteInfo[0].hasOwnProperty("realms")?this.multisiteInfo[0].realms:[],this.zonegroupList=void 0!==this.multisiteInfo[1]&&this.multisiteInfo[1].hasOwnProperty("zonegroups")?this.multisiteInfo[1].zonegroups:[],this.zonegroupList.forEach(s=>{!0===s.is_master&&!E().isEmpty(s.realm_id)&&(this.isMaster=!0,this.disableMaster=!0)}),this.isMaster||(this.multisiteZonegroupForm.get("master_zonegroup").setValue(!0),this.multisiteZonegroupForm.get("master_zonegroup").disable()),this.zoneList=void 0!==this.multisiteInfo[2]&&this.multisiteInfo[2].hasOwnProperty("zones")?this.multisiteInfo[2].zones:[],this.zonegroupNames=this.zonegroupList.map(s=>s.name);const i=this.zonegroupList.map(s=>s.zones).reduce((s,l)=>s.concat(l),[]).map(s=>s.name);if(this.allZoneNames=this.zoneList.map(s=>s.name),this.allZoneNames=E().difference(this.allZoneNames,i),"create"===this.action&&null!==this.defaultsInfo.defaultRealmName&&(this.multisiteZonegroupForm.get("selectedRealm").setValue(this.defaultsInfo.defaultRealmName),this.disableMaster&&this.multisiteZonegroupForm.get("master_zonegroup").disable()),"edit"===this.action){this.multisiteZonegroupForm.get("zonegroupName").setValue(this.info.data.name),this.multisiteZonegroupForm.get("selectedRealm").setValue(this.info.data.parent),this.multisiteZonegroupForm.get("default_zonegroup").setValue(this.info.data.is_default),this.multisiteZonegroupForm.get("master_zonegroup").setValue(this.info.data.is_master),this.multisiteZonegroupForm.get("zonegroup_endpoints").setValue(this.info.data.endpoints),this.info.data.is_default&&this.multisiteZonegroupForm.get("default_zonegroup").disable(),!this.info.data.is_default&&this.multisiteZonegroupForm.getValue("selectedRealm")!==this.defaultsInfo.defaultRealmName&&(this.multisiteZonegroupForm.get("default_zonegroup").disable(),this.disableDefault=!0),(this.info.data.is_master||this.disableMaster)&&this.multisiteZonegroupForm.get("master_zonegroup").disable(),this.zonegroupZoneNames=this.info.data.zones.map(l=>l.name),this.zgZoneNames=this.info.data.zones.map(l=>l.name),this.zgZoneIds=this.info.data.zones.map(l=>l.id);const s=new Set(this.allZoneNames);this.labelsOption=Array.from(s).map(l=>({enabled:!0,name:l,selected:!1,description:null})),this.info.data.placement_targets.forEach(l=>{const r=this.addPlacementTarget();let d={placement_id:l.name,tags:l.tags.join(","),storage_class:"string"==typeof l.storage_classes?l.storage_classes:l.storage_classes.join(",")};r.patchValue(d)})}}submit(){const _=this.multisiteZonegroupForm.getRawValue();if("create"===this.action)this.realm=new x.L6,this.realm.name=_.selectedRealm,this.zonegroup=new x.iG,this.zonegroup.name=_.zonegroupName,this.zonegroup.endpoints=_.zonegroup_endpoints,this.rgwZonegroupService.create(this.realm,this.zonegroup,_.default_zonegroup,_.master_zonegroup).subscribe(()=>{this.notificationService.show(w.k.success,"Zonegroup: '" + _.zonegroupName + "' created successfully"),this.activeModal.close()},()=>{this.multisiteZonegroupForm.setErrors({cdSubmitButton:!0})});else if("edit"===this.action){this.removedZones=E().difference(this.zgZoneNames,this.zonegroupZoneNames);const o=this.info.data.zones.filter(i=>i.id===this.info.data.master_zone);if(this.isRemoveMasterZone=this.removedZones.includes(o[0].name),this.isRemoveMasterZone)return void this.multisiteZonegroupForm.setErrors({cdSubmitButton:!0});this.addedZones=E().difference(this.zonegroupZoneNames,this.zgZoneNames),this.realm=new x.L6,this.realm.name=_.selectedRealm,this.zonegroup=new x.iG,this.zonegroup.name=this.info.data.name,this.newZonegroupName=_.zonegroupName,this.zonegroup.endpoints=_.zonegroup_endpoints.toString(),this.zonegroup.placement_targets=_.placementTargets,this.rgwZonegroupService.update(this.realm,this.zonegroup,this.newZonegroupName,_.default_zonegroup,_.master_zonegroup,this.removedZones,this.addedZones).subscribe(()=>{this.notificationService.show(w.k.success,"Zonegroup: '" + _.zonegroupName + "' updated successfully"),this.activeModal.close()},()=>{this.multisiteZonegroupForm.setErrors({cdSubmitButton:!0})})}}addPlacementTarget(){this.placementTargets=this.multisiteZonegroupForm.get("placementTargets");const _=new V.d({placement_id:new a.p4("",{validators:[a.kI.required]}),tags:new a.p4(""),storage_class:new a.p4([])});return this.placementTargets.push(_),_}trackByFn(_){return _}removePlacementTarget(_){this.placementTargets=this.multisiteZonegroupForm.get("placementTargets"),this.placementTargets.removeAt(_)}showError(_,o,i,s){return this.multisiteZonegroupForm.controls.placementTargets.controls[_].showError(o,i,s)}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(G.Kz),e.Y36(I.p4),e.Y36(ge.K),e.Y36(Y.g),e.Y36(a.QS))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-multisite-zonegroup-form"]],decls:49,vars:24,consts:function(){let n,_,o,i,s,l,r,d,u,R,O,F,b,h,M,L,S,W,C,Z,D,U,v,y;return n="" + "\ufffd0\ufffd" + " Zone Group",_="Select Realm",o="-- Select a realm --",i="Zone Group Name",s="Default",l="Master",r="Endpoints",d="This field is required.",u="The chosen zone group name is already in use.",R="Zone group doesn't belong to the default realm.",O="Please consult the " + "\ufffd#3\ufffd" + "documentation" + "\ufffd/#3\ufffd" + " to follow the failover mechanism",F="You cannot unset the default flag.",b="Multiple master zone groups can't be configured. If you want to create a new zone group and make it the master zone group, you must delete the default zone group.",h="Please consult the " + "\ufffd#3\ufffd" + "documentation" + "\ufffd/#3\ufffd" + " to follow the failover mechanism",M="You cannot unset the master flag.",L="This field is required.",S="Please enter a valid IP address.",W="Zones",C="Cannot remove master zone.",Z="Add placement target",D="Placement Id",U="Tags",v="Storage Class",y="This field is required.",[[3,"modalRef"],[1,"modal-title"],n,[1,"modal-content"],["name","multisiteZonegroupForm","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","selectedRealm",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["id","selectedRealm","formControlName","selectedRealm","name","selectedRealm",1,"form-select"],["ngValue",""],o,[3,"value","selected",4,"ngFor","ngForOf"],["for","zonegroupName",1,"cd-col-form-label","required"],i,["type","text","placeholder","Zone group name...","id","zonegroupName","name","zonegroupName","formControlName","zonegroupName",1,"form-control"],["class","invalid-feedback",4,"ngIf"],[1,"custom-control","custom-checkbox"],["id","default_zonegroup","name","default_zonegroup","formControlName","default_zonegroup","type","checkbox",1,"form-check-input"],["for","default_zonegroup",1,"form-check-label"],s,[4,"ngIf"],["id","master_zonegroup","name","master_zonegroup","formControlName","master_zonegroup","type","checkbox",1,"form-check-input"],["for","master_zonegroup",1,"form-check-label"],l,["for","zonegroup_endpoints",1,"cd-col-form-label","required"],r,["type","text","placeholder","e.g, http://ceph-node-00.com:80","id","zonegroup_endpoints","name","zonegroup_endpoints","formControlName","zonegroup_endpoints",1,"form-control"],["class","form-group row",4,"ngIf"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[3,"value","selected"],[1,"invalid-feedback"],d,u,R,O,[3,"href"],F,b,h,M,L,S,["for","zones",1,"cd-col-form-label"],W,["id","zones",3,"data","options","customBadges"],C,["formArrayName","placementTargets"],[4,"ngFor","ngForOf","ngForTrackBy"],["type","button","id","add-plc",1,"btn","btn-light","float-end","my-3",3,"click"],[3,"ngClass"],Z,[1,"card",3,"formGroup"],[1,"card-header"],["name","remove_placement_target","ngbTooltip","Remove",1,"float-end","clickable",3,"click"],[1,"card-body"],["for","placement_id",1,"cd-col-form-label","required"],D,["type","text","name","placement_id","id","placement_id","formControlName","placement_id","placeholder","eg. default-placement",1,"form-control"],["for","tags",1,"cd-col-form-label"],U,["type","text","name","tags","id","tags","formControlName","tags","placeholder","comma separated tags, eg. default-placement, ssd",1,"form-control"],["for","storage_class",1,"cd-col-form-label"],v,["type","text","name","storage_class","id","storage_class","formControlName","storage_class","placeholder","eg. Standard-tier",1,"form-control"],y]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.BQk(),e.ynx(4,3),e.TgZ(5,"form",4,5)(7,"div",6)(8,"div",7)(9,"label",8),e.SDv(10,9),e.qZA(),e.TgZ(11,"div",10)(12,"select",11)(13,"option",12),e.SDv(14,13),e.qZA(),e.YNc(15,wa,2,3,"option",14),e.qZA()()(),e.TgZ(16,"div",7)(17,"label",15),e.SDv(18,16),e.qZA(),e.TgZ(19,"div",10),e._UZ(20,"input",17),e.YNc(21,xa,2,0,"span",18),e.YNc(22,ka,2,0,"span",18),e.TgZ(23,"div",19),e._UZ(24,"input",20),e.TgZ(25,"label",21),e.SDv(26,22),e.qZA(),e.YNc(27,za,3,0,"span",23),e.YNc(28,qa,4,1,"cd-helper",23),e.YNc(29,Ha,3,0,"cd-helper",23),e._UZ(30,"br")(31,"input",24),e.TgZ(32,"label",25),e.SDv(33,26),e.qZA(),e.YNc(34,Xa,3,0,"span",23),e.YNc(35,Ba,4,1,"cd-helper",23),e.YNc(36,Qa,3,0,"cd-helper",23),e.qZA()()(),e.TgZ(37,"div",7)(38,"label",27),e.SDv(39,28),e.qZA(),e.TgZ(40,"div",10),e._UZ(41,"input",29),e.YNc(42,Ya,2,0,"span",18),e.YNc(43,Ja,2,0,"span",18),e.qZA()(),e.YNc(44,Va,7,4,"div",30),e.YNc(45,tl,9,5,"div",23),e.qZA(),e.TgZ(46,"div",31)(47,"cd-form-button-panel",32),e.NdJ("submitActionEvent",function(){return o.submit()}),e.ALo(48,"titlecase"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const i=e.MAs(6);e.Q6J("modalRef",o.activeModal),e.xp6(3),e.pQV(e.lcZ(3,20,o.action)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.multisiteZonegroupForm),e.xp6(10),e.Q6J("ngForOf",o.realmList),e.xp6(6),e.Q6J("ngIf",o.multisiteZonegroupForm.showError("zonegroupName",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteZonegroupForm.showError("zonegroupName",i,"uniqueName")),e.xp6(2),e.uIk("disabled","edit"===o.action||null),e.xp6(3),e.Q6J("ngIf",o.disableDefault&&"create"===o.action),e.xp6(1),e.Q6J("ngIf","edit"===o.action&&!o.info.data.is_default),e.xp6(1),e.Q6J("ngIf","edit"===o.action&&o.info.data.is_default),e.xp6(2),e.uIk("disabled","edit"===o.action||null),e.xp6(3),e.Q6J("ngIf",o.disableMaster&&"create"===o.action),e.xp6(1),e.Q6J("ngIf","edit"===o.action&&!o.info.data.is_master),e.xp6(1),e.Q6J("ngIf","edit"===o.action&&o.info.data.is_master),e.xp6(6),e.Q6J("ngIf",o.multisiteZonegroupForm.showError("zonegroup_endpoints",i,"required")),e.xp6(1),e.Q6J("ngIf",o.multisiteZonegroupForm.showError("zonegroup_endpoints",i,"endpoint")),e.xp6(1),e.Q6J("ngIf","edit"===o.action),e.xp6(1),e.Q6J("ngIf","edit"===o.action),e.xp6(2),e.Q6J("form",o.multisiteZonegroupForm)("submitText",e.lcZ(48,22,o.action)+" Zone Group")}},dependencies:[T.mk,T.sg,T.O5,j.S,I_.m,X.z,B.p,k.o,q.b,z.P,H.V,a._Y,a.YN,a.Kr,a.Fj,a.Wl,a.EJ,a.JJ,a.JL,a.sg,a.u,a.CE,G._L,T.rS,ya.f]}),t})();var nl=c(61717),il=c(36848),sl=c(7273);const al=["tree"];function ll(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"cd-alert-panel",17),e.tHW(1,18),e.TgZ(2,"a",19),e.NdJ("click",function(){e.CHM(_);const i=e.oxw();return e.KtG(i.enableRgwModule())}),e.qZA(),e.N_p(),e.qZA()}}function rl(t,n){1&t&&(e.TgZ(0,"cd-alert-panel",20),e.tHW(1,21),e._UZ(2,"a",22),e.N_p(),e.qZA())}function cl(t,n){if(1&t&&(e.TgZ(0,"span"),e._UZ(1,"cd-table-actions",23),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("permission",_.permission)("btnColor","light")("selection",_.selection)("tableActions",_.migrateTableAction)}}const dl=function(t,n,_){return[t,n,_]};function ul(t,n){if(1&t&&e._UZ(0,"i",24),2&t){const _=e.oxw();e.Q6J("ngClass",e.kEZ(1,dl,_.icons.large,_.icons.spinner,_.icons.spin))}}function gl(t,n){if(1&t&&(e.TgZ(0,"span"),e._UZ(1,"i",30),e.qZA()),2&t){const _=e.oxw(2).$implicit,o=e.oxw();e.xp6(1),e.Q6J("title",_.data.warning_message)("ngClass",o.icons.danger)}}function Rl(t,n){if(1&t&&(e.TgZ(0,"span",29),e.YNc(1,gl,2,2,"span",5),e._UZ(2,"i",24),e._uU(3),e.qZA()),2&t){const _=e.oxw().$implicit;e.xp6(1),e.Q6J("ngIf",_.data.show_warning),e.xp6(1),e.Q6J("ngClass",_.data.icon),e.xp6(1),e.hij(" ",_.data.name," ")}}function Tl(t,n){1&t&&(e.TgZ(0,"span",31),e._uU(1," default "),e.qZA())}function El(t,n){1&t&&(e.TgZ(0,"span",32),e._uU(1," master "),e.qZA())}function fl(t,n){1&t&&(e.TgZ(0,"span",32),e._uU(1," secondary-zone "),e.qZA())}const b_=function(t){return[t]};function pl(t,n){if(1&t){const _=e.EpF();e.TgZ(0,"div",33)(1,"div",34)(2,"button",35),e.NdJ("click",function(){e.CHM(_);const i=e.oxw().$implicit,s=e.oxw();return e.KtG(s.openModal(i,!0))}),e._UZ(3,"i",24),e.qZA()(),e.TgZ(4,"div",34)(5,"button",36),e.NdJ("click",function(){e.CHM(_);const i=e.oxw().$implicit,s=e.oxw();return e.KtG(s.delete(i))}),e._UZ(6,"i",24),e.qZA()()()}if(2&t){const _=e.oxw().$implicit,o=e.oxw();e.xp6(1),e.Q6J("title",o.editTitle),e.xp6(1),e.Q6J("disabled",o.getDisable()||_.data.secondary_zone),e.xp6(1),e.Q6J("ngClass",e.VKq(6,b_,o.icons.edit)),e.xp6(1),e.Q6J("title",o.deleteTitle),e.xp6(1),e.Q6J("disabled",o.isDeleteDisabled(_)||_.data.secondary_zone),e.xp6(1),e.Q6J("ngClass",e.VKq(8,b_,o.icons.destroy))}}function ml(t,n){if(1&t&&(e.YNc(0,Rl,4,3,"span",25),e.YNc(1,Tl,2,0,"span",26),e.YNc(2,El,2,0,"span",27),e.YNc(3,fl,2,0,"span",27),e.YNc(4,pl,7,10,"div",28)),2&t){const _=n.$implicit;e.Q6J("ngIf",_.data.name),e.xp6(1),e.Q6J("ngIf",_.data.is_default),e.xp6(1),e.Q6J("ngIf",_.data.is_master),e.xp6(1),e.Q6J("ngIf",_.data.secondary_zone),e.xp6(1),e.Q6J("ngIf",_.isFocused)}}function Ml(t,n){if(1&t&&(e.TgZ(0,"div",37)(1,"legend"),e._uU(2),e.qZA(),e.TgZ(3,"div"),e._UZ(4,"cd-table-key-value",38),e.qZA()()),2&t){const _=e.oxw();e.xp6(2),e.Oqu(_.metadataTitle),e.xp6(2),e.Q6J("data",_.metadata)}}class Te{constructor(n,_,o,i,s,l,r,d,u,R,O,F){this.modalService=n,this.timerService=_,this.authStorageService=o,this.actionLabels=i,this.timerServiceVariable=s,this.router=l,this.rgwRealmService=r,this.rgwZonegroupService=d,this.rgwZoneService=u,this.rgwDaemonService=R,this.mgrModuleService=O,this.notificationService=F,this.sub=new N_.w,this.messages={noDefaultRealm:"Please create a default realm first to enable this feature",noMasterZone:"Please create a master zone for each zone group to enable this feature",noRealmExists:"No realm exists",disableExport:"Please create master zone group and master zone for each of the realms"},this.icons=$.P,this.selection=new qe.r,this.loadingIndicator=!0,this.nodes=[],this.treeOptions={useVirtualScroll:!0,nodeHeight:22,levelPadding:20,actionMapping:{mouse:{click:this.onNodeSelected.bind(this)}}},this.realms=[],this.zonegroups=[],this.zones=[],this.realmIds=[],this.zoneIds=[],this.defaultRealmId="",this.defaultZonegroupId="",this.defaultZoneId="",this.multisiteInfo=[],this.defaultsInfo=[],this.showMigrateAction=!1,this.editTitle="Edit",this.deleteTitle="Delete",this.disableExport=!0,this.restartGatewayMessage=!1,this.rgwModuleData=[],this.permission=this.authStorageService.getPermissions().rgw}openModal(n,_=!1){const o=_?n.data.type:n;this.bsModalRef=this.modalService.show("realm"===o?fa:"zonegroup"===o?ol:va,{resource:o,action:_?"edit":"create",info:n,defaultsInfo:this.defaultsInfo,multisiteInfo:this.multisiteInfo},{size:"lg"})}openMigrateModal(){this.bsModalRef=this.modalService.show(ms,{multisiteInfo:this.multisiteInfo},{size:"lg"})}openImportModal(){this.bsModalRef=this.modalService.show(ca,{multisiteInfo:this.multisiteInfo},{size:"lg"})}openExportModal(){this.bsModalRef=this.modalService.show(ws,{defaultsInfo:this.defaultsInfo,multisiteInfo:this.multisiteInfo},{size:"lg"})}getDisableExport(){return this.realms.forEach(n=>{this.zonegroups.forEach(_=>{n.id===_.realm_id&&_.is_master&&""!==_.master_zone&&(this.disableExport=!1)})}),!this.rgwModuleStatus||(this.realms.length<1?this.messages.noRealmExists:!!this.disableExport&&this.messages.disableExport)}getDisableImport(){return!this.rgwModuleStatus}ngOnInit(){const i={permission:"read",icon:$.P.exchange,name:this.actionLabels.MIGRATE,click:()=>this.openMigrateModal()},s={permission:"read",icon:$.P.download,name:this.actionLabels.IMPORT,click:()=>this.openImportModal(),disable:()=>this.getDisableImport()},l={permission:"read",icon:$.P.upload,name:this.actionLabels.EXPORT,click:()=>this.openExportModal(),disable:()=>this.getDisableExport()};this.createTableActions=[{permission:"create",icon:$.P.add,name:this.actionLabels.CREATE+" Realm",click:()=>this.openModal("realm")},{permission:"create",icon:$.P.add,name:this.actionLabels.CREATE+" Zone Group",click:()=>this.openModal("zonegroup"),disable:()=>this.getDisable()},{permission:"create",icon:$.P.add,name:this.actionLabels.CREATE+" Zone",click:()=>this.openModal("zone")}],this.migrateTableAction=[i],this.importAction=[s],this.exportAction=[l];const r=[this.rgwRealmService.getAllRealmsInfo(),this.rgwZonegroupService.getAllZonegroupsInfo(),this.rgwZoneService.getAllZonesInfo()];this.sub=this.timerService.get(()=>(0,ne.D)(r),2*this.timerServiceVariable.TIMER_SERVICE_PERIOD).subscribe(d=>{this.multisiteInfo=d,this.loadingIndicator=!1,this.nodes=this.abstractTreeData(d)},d=>{}),this.mgrModuleService.list().subscribe(d=>{this.rgwModuleData=d.filter(u=>"rgw"===u.name),this.rgwModuleData.length>0&&(this.rgwModuleStatus=this.rgwModuleData[0].enabled)})}ngOnDestroy(){this.sub.unsubscribe()}abstractTreeData(n){let _=[],o={},i={},s=[],l={},r=[];if(this.realms=n[0].realms,this.zonegroups=n[1].zonegroups,this.zones=n[2].zones,this.defaultRealmId=n[0].default_realm,this.defaultZonegroupId=n[1].default_zonegroup,this.defaultZoneId=n[2].default_zone,this.defaultsInfo=this.getDefaultsEntities(this.defaultRealmId,this.defaultZonegroupId,this.defaultZoneId),this.realms.length>0)for(const d of this.realms){const u=this.rgwRealmService.getRealmTree(d,this.defaultRealmId);o=u.nodes,this.realmIds=this.realmIds.concat(u.realmIds);for(const R of this.zonegroups)if(R.realm_id===d.id){i=this.rgwZonegroupService.getZonegroupTree(R,this.defaultZonegroupId,d);for(const O of R.zones){const F=this.rgwZoneService.getZoneTree(O,this.defaultZoneId,this.zones,R,d);l=F.nodes,this.zoneIds=this.zoneIds.concat(F.zoneIds),r.push(l),l={}}i.children=r,r=[],s.push(i),i={}}o.children=s,_.push(o),i={},l={},o={},s=[],r=[]}if(this.zonegroups.length>0)for(const d of this.zonegroups)if(!this.realmIds.includes(d.realm_id)){o=this.rgwZonegroupService.getZonegroupTree(d,this.defaultZonegroupId);for(const u of d.zones){const R=this.rgwZoneService.getZoneTree(u,this.defaultZoneId,this.zones,d);i=R.nodes,this.zoneIds=this.zoneIds.concat(R.zoneIds),s.push(i),i={}}o.children=s,_.push(o),i={},o={},s=[]}if(this.zones.length>0)for(const d of this.zones)this.zoneIds.length>0&&!this.zoneIds.includes(d.id)&&(o=this.rgwZoneService.getZoneTree(d,this.defaultZoneId,this.zones).nodes,_.push(o),o={});return this.realms.length<1&&this.zonegroups.length<1&&this.zones.length<1?[{name:"No nodes!"}]:(this.realmIds=[],this.zoneIds=[],this.getDisableMigrate(),this.rgwDaemonService.list().subscribe(d=>{const u=d.map(R=>R.realm_name);""!=this.defaultRealmId&&""!=this.defaultZonegroupId&&""!=this.defaultZoneId&&u.includes("")&&(this.restartGatewayMessage=!0)}),_)}getDefaultsEntities(n,_,o){const i=this.realms.find(R=>R.id===n),s=this.zonegroups.find(R=>R.id===_),l=this.zones.find(R=>R.id===o);return{defaultRealmName:void 0!==i?i.name:null,defaultZonegroupName:void 0!==s?s.name:null,defaultZoneName:void 0!==l?l.name:null}}onNodeSelected(n,_){je.iM.ACTIVATE(n,_,!0),this.metadataTitle=_.data.name,this.metadata=_.data.info,_.data.show=!0}onUpdateData(){this.tree.treeModel.expandAll()}getDisable(){let n=!0;return""===this.defaultRealmId?this.messages.noDefaultRealm:(this.zonegroups.forEach(_=>{E().isEmpty(_.master_zone)&&(n=!1)}),n?(this.editTitle="Edit",!1):(this.editTitle="Please create a master zone for each existing zonegroup to enable this feature",this.messages.noMasterZone))}getDisableMigrate(){return this.showMigrateAction=0===this.realms.length&&1===this.zonegroups.length&&"default"===this.zonegroups[0].name&&1===this.zones.length&&"default"===this.zones[0].name,this.showMigrateAction}isDeleteDisabled(n){let _=!1,o=0;if("realm"===n.data.type&&n.data.is_default&&this.realms.length<2&&(_=!0),"zonegroup"===n.data.type)if(this.zonegroups.length<2)this.deleteTitle="You can not delete the only zonegroup available",_=!0;else if(n.data.is_default)this.deleteTitle="You can not delete the default zonegroup",_=!0;else if(n.data.is_master){for(let i of this.zonegroups)if(!0===i.is_master&&(o++,o>1))break;o<2&&(this.deleteTitle="You can not delete the only master zonegroup available",_=!0)}return"zone"===n.data.type&&(this.zones.length<2?(this.deleteTitle="You can not delete the only zone available",_=!0):n.data.is_default?(this.deleteTitle="You can not delete the default zone",_=!0):n.data.is_master&&n.data.zone_zonegroup.zones.length<2&&(this.deleteTitle="You can not delete the master zone as there are no more zones in this zonegroup",_=!0)),_||(this.deleteTitle="Delete"),_}delete(n){"realm"===n.data.type?this.modalRef=this.modalService.show(Qe.M,{itemDescription:"" + n.data.type + " " + n.data.name + "",itemNames:[`${n.data.name}`],submitAction:()=>{this.rgwRealmService.delete(n.data.name).subscribe(()=>{this.modalRef.close(),this.notificationService.show(w.k.success,"Realm: '" + n.data.name + "' deleted successfully")},()=>{this.modalRef.componentInstance.stopLoadingSpinner()})}}):"zonegroup"===n.data.type?this.modalRef=this.modalService.show(Ls,{zonegroup:n.data}):"zone"===n.data.type&&(this.modalRef=this.modalService.show(Os,{zone:n.data}))}enableRgwModule(){let n;const _=()=>{(0,ls.H)(2e3).subscribe(()=>{this.mgrModuleService.list().subscribe(()=>{this.notificationService.suspendToasties(!1),this.blockUI.stop(),this.notificationService.show(w.k.success,"Enabled RGW Module"),this.router.navigateByUrl("/",{skipLocationChange:!0}).then(()=>{this.router.navigate(["/rgw/multisite"])})},()=>{_()})})};this.rgwModuleStatus||(n=this.mgrModuleService.enable("rgw")),n.subscribe(()=>{},()=>{this.notificationService.suspendToasties(!0),this.blockUI.start("Reconnecting, please wait ..."),_()})}}Te.\u0275fac=function(n){return new(n||Te)(e.Y36(ee.Z),e.Y36(il.f),e.Y36(ce.j),e.Y36(I.p4),e.Y36(I.eu),e.Y36(J.F0),e.Y36(de.y),e.Y36(ge.K),e.Y36(ue.g),e.Y36(oe.b),e.Y36(sl.N),e.Y36(Y.g))},Te.\u0275cmp=e.Xpm({type:Te,selectors:[["cd-rgw-multisite-details"]],viewQuery:function(n,_){if(1&n&&e.Gf(al,5),2&n){let o;e.iGM(o=e.CRH())&&(_.tree=o.first)}},decls:21,vars:18,consts:function(){let t,n,_;return t="Topology Viewer",n="In order to access the import/export feature, the rgw module must be enabled " + "\ufffd#2\ufffd" + " Enable the Object Gateway Module" + "\ufffd/#2\ufffd" + "",_="Please restart all Ceph Object Gateway instances in all zones to ensure consistent multisite configuration updates. " + "\ufffd#2\ufffd" + " Cluster->Services" + "\ufffd/#2\ufffd" + "",[[1,"row"],[1,"col-sm-12","col-lg-12"],["type","info","spacingClass","mb-3",4,"ngIf"],["type","warning","spacingClass","mb-3",4,"ngIf"],[1,"btn-group","mb-4","me-2",3,"permission","selection","tableActions"],[4,"ngIf"],[1,"btn-group","mb-4","me-2",3,"permission","btnColor","selection","tableActions"],[1,"card"],[1,"card-header"],t,[1,"card-body"],[1,"col-sm-6","col-lg-6","tree-container"],[3,"ngClass",4,"ngIf"],[3,"nodes","options","updateData"],["tree",""],["treeNodeTemplate",""],["class","col-sm-6 col-lg-6 metadata",4,"ngIf"],["type","info","spacingClass","mb-3"],n,[1,"text-decoration-underline",3,"click"],["type","warning","spacingClass","mb-3"],_,["routerLink","/services",1,"text-decoration-underline"],[1,"btn-group","mb-4","me-2","secondary",3,"permission","btnColor","selection","tableActions"],[3,"ngClass"],["class","me-3",4,"ngIf"],["class","badge badge-success me-2",4,"ngIf"],["class","badge badge-warning me-2",4,"ngIf"],["class","btn-group align-inline-btns","role","group",4,"ngIf"],[1,"me-3"],[1,"text-danger",3,"title","ngClass"],[1,"badge","badge-success","me-2"],[1,"badge","badge-warning","me-2"],["role","group",1,"btn-group","align-inline-btns"],[3,"title"],["type","button",1,"btn","btn-light","dropdown-toggle-split","ms-1",3,"disabled","click"],["type","button",1,"btn","btn-light","ms-1",3,"disabled","click"],[1,"col-sm-6","col-lg-6","metadata"],["cdTableDetail","",3,"data"]]},template:function(n,_){1&n&&(e.TgZ(0,"div",0)(1,"div",1)(2,"div"),e.YNc(3,ll,3,0,"cd-alert-panel",2),e.YNc(4,rl,3,0,"cd-alert-panel",3),e._UZ(5,"cd-table-actions",4),e.YNc(6,cl,2,4,"span",5),e._UZ(7,"cd-table-actions",6)(8,"cd-table-actions",6),e.qZA(),e.TgZ(9,"div",7)(10,"div",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10)(13,"div",0)(14,"div",11),e.YNc(15,ul,1,5,"i",12),e.TgZ(16,"tree-root",13,14),e.NdJ("updateData",function(){return _.onUpdateData()}),e.YNc(18,ml,5,5,"ng-template",null,15,e.W1O),e.qZA()(),e.YNc(20,Ml,5,2,"div",16),e.qZA()()()()()),2&n&&(e.xp6(3),e.Q6J("ngIf",!_.rgwModuleStatus),e.xp6(1),e.Q6J("ngIf",_.restartGatewayMessage),e.xp6(1),e.Q6J("permission",_.permission)("selection",_.selection)("tableActions",_.createTableActions),e.xp6(1),e.Q6J("ngIf",_.showMigrateAction),e.xp6(1),e.Q6J("permission",_.permission)("btnColor","light")("selection",_.selection)("tableActions",_.importAction),e.xp6(1),e.Q6J("permission",_.permission)("btnColor","light")("selection",_.selection)("tableActions",_.exportAction),e.xp6(7),e.Q6J("ngIf",_.loadingIndicator),e.xp6(1),e.Q6J("nodes",_.nodes)("options",_.treeOptions),e.xp6(4),e.Q6J("ngIf",_.metadata))},dependencies:[T.mk,T.O5,Re.G,f_.b,Je.K,k.o,J.rH,je.qr],styles:[".tree-container[_ngcontent-%COMP%]{height:calc(100vh - 200px)}.align-inline-btns[_ngcontent-%COMP%]{margin-left:5em}.btn[_ngcontent-%COMP%]:disabled{pointer-events:none}"]}),(0,xe.gn)([(0,nl.bH)(),(0,xe.w6)("design:type",Object)],Te.prototype,"blockUI",void 0);var Sl=c(40267),Cl=c(7357),Ol=c(11656),Fl=c(4167),Pl=c(43190),Nl=c(68307),Gl=c(47349),Al=c(79241),Il=c(98677),bl=c(30490),hl=c(9219),Ll=c(17401),Wl=c(9024),$l=c(54740);const me=function(t,n){return[t,n]};let Zl=(()=>{class t{constructor(){this.icons=$.P}}return t.\u0275fac=function(_){return new(_||t)},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-sync-primary-zone"]],inputs:{realm:"realm",zonegroup:"zonegroup",zone:"zone"},decls:17,vars:23,consts:[[1,"pb-5"],[1,"pt-2",3,"ngClass"],[1,"badge","badge-info","mt-2"],[1,"mt-2",3,"ngClass"]],template:function(_,o){1&_&&(e.TgZ(0,"ul",0)(1,"li"),e._UZ(2,"i",1),e.qZA(),e.TgZ(3,"li",2),e._uU(4),e.qZA(),e.TgZ(5,"li"),e._UZ(6,"i",3),e.qZA(),e.TgZ(7,"li"),e._UZ(8,"i",3),e.qZA(),e.TgZ(9,"p",2),e._uU(10),e.qZA(),e.TgZ(11,"li"),e._UZ(12,"i",3),e.qZA(),e.TgZ(13,"li"),e._UZ(14,"i",3),e.qZA(),e.TgZ(15,"li",2),e._uU(16),e.qZA()()),2&_&&(e.xp6(2),e.Q6J("ngClass",e.WLB(8,me,o.icons.large2x,o.icons.reweight)),e.xp6(2),e.Oqu(o.realm),e.xp6(2),e.Q6J("ngClass",e.WLB(11,me,o.icons.large2x,o.icons.down)),e.xp6(2),e.Q6J("ngClass",e.WLB(14,me,o.icons.large2x,o.icons.cubes)),e.xp6(2),e.Oqu(o.zonegroup),e.xp6(2),e.Q6J("ngClass",e.WLB(17,me,o.icons.large2x,o.icons.down)),e.xp6(2),e.Q6J("ngClass",e.WLB(20,me,o.icons.large2x,o.icons.deploy)),e.xp6(2),e.Oqu(o.zone))},dependencies:[T.mk],styles:["ul[_ngcontent-%COMP%]{align-items:center;display:flex;flex-direction:column;list-style-type:none}.align-primary-zone[_ngcontent-%COMP%]{padding-left:4em}"]}),t})();var h_=c(90068);function Dl(t,n){1&t&&(e.TgZ(0,"span")(1,"ul",1)(2,"li")(3,"b"),e._uU(4,"Status:"),e.qZA()(),e.TgZ(5,"li"),e._uU(6,"No Sync"),e.qZA()()())}function Ul(t,n){if(1&t&&(e.TgZ(0,"span")(1,"b"),e._uU(2),e.ALo(3,"titlecase"),e.qZA(),e._uU(4),e.ALo(5,"titlecase"),e.qZA()),2&t){const _=e.oxw(2).$implicit;e.xp6(2),e.Oqu(e.lcZ(3,2,_.split(":")[0])),e.xp6(2),e.hij(":",e.lcZ(5,4,_.split(":")[1])," ")}}function vl(t,n){if(1&t&&(e.TgZ(0,"span")(1,"b"),e._uU(2),e.ALo(3,"titlecase"),e.qZA()()),2&t){const _=e.oxw(2).$implicit;e.xp6(2),e.Oqu(e.lcZ(3,1,_))}}function yl(t,n){if(1&t&&(e.TgZ(0,"span"),e.YNc(1,Ul,6,6,"span",0),e.YNc(2,vl,4,3,"span",0),e.qZA()),2&t){const _=e.oxw().$implicit;e.xp6(1),e.Q6J("ngIf",null==_?null:_.includes(":")),e.xp6(1),e.Q6J("ngIf",!(null!=_&&_.includes(":")))}}function wl(t,n){if(1&t&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"titlecase"),e.qZA()),2&t){const _=e.oxw().$implicit;e.xp6(1),e.hij(" ",e.lcZ(2,1,_)," ")}}function xl(t,n){if(1&t&&(e.TgZ(0,"li"),e.YNc(1,yl,3,2,"span",0),e.YNc(2,wl,3,3,"span",0),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(3);e.xp6(1),e.Q6J("ngIf",!(null!=_&&_.includes(o.metadataSyncInfo.syncstatus)||null!=_&&_.includes("failed")||null!=_&&_.includes("error"))),e.xp6(1),e.Q6J("ngIf",(null==_?null:_.includes("failed"))||(null==_?null:_.includes("error")))}}function kl(t,n){if(1&t&&(e.TgZ(0,"ul",8)(1,"li")(2,"h5")(3,"b"),e._uU(4,"Metadata Sync Status:"),e.qZA()()(),e.YNc(5,xl,3,2,"li",9),e.qZA()),2&t){const _=e.oxw(2);e.xp6(5),e.Q6J("ngForOf",_.metadataSyncInfo.fullSyncStatus)}}function zl(t,n){1&t&&(e.TgZ(0,"li",10),e._uU(1,"Up to Date"),e.qZA())}function ql(t,n){if(1&t&&(e.TgZ(0,"a",12),e.SDv(1,13),e.ALo(2,"titlecase"),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.Q6J("ngbPopover",_),e.xp6(2),e.pQV(e.lcZ(2,2,o.metadataSyncInfo.syncstatus)),e.QtT(1)}}function Hl(t,n){if(1&t&&(e.TgZ(0,"a",12),e.SDv(1,14),e.qZA()),2&t){e.oxw(2);const _=e.MAs(2);e.Q6J("ngbPopover",_)}}function Xl(t,n){if(1&t&&(e.YNc(0,ql,3,4,"a",11),e.YNc(1,Hl,2,1,"a",11)),2&t){const _=e.oxw(2);e.Q6J("ngIf","Not Syncing From Zone"!==_.metadataSyncInfo.syncstatus),e.xp6(1),e.Q6J("ngIf","Not Syncing From Zone"===_.metadataSyncInfo.syncstatus)}}const Bl=function(t){return[t]};function Ql(t,n){if(1&t&&(e.TgZ(0,"li"),e._UZ(1,"i",15),e.TgZ(2,"a",16),e.SDv(3,17),e.qZA()()),2&t){e.oxw();const _=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("ngClass",e.VKq(2,Bl,o.icons.danger)),e.xp6(1),e.Q6J("ngbPopover",_)}}function Yl(t,n){if(1&t&&(e.TgZ(0,"li",18),e._uU(1),e.ALo(2,"relativeDate"),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.Oqu(e.lcZ(2,1,_.metadataSyncInfo.timestamp))}}function Jl(t,n){if(1&t&&(e.TgZ(0,"span"),e.YNc(1,kl,6,1,"ng-template",null,2,e.W1O),e.TgZ(3,"ul",1),e.YNc(4,zl,2,0,"ng-template",null,3,e.W1O),e.YNc(6,Xl,2,2,"ng-template",null,4,e.W1O),e.TgZ(8,"li")(9,"b"),e._uU(10,"Status:"),e.qZA()(),e.YNc(11,Ql,4,4,"li",5),e.TgZ(12,"li",6),e._uU(13," Last Synced: "),e.qZA(),e.YNc(14,Yl,3,3,"li",7),e.qZA()()),2&t){const _=e.MAs(5),o=e.MAs(7),i=e.oxw();e.xp6(11),e.Q6J("ngIf",(null==i.metadataSyncInfo.syncstatus?null:i.metadataSyncInfo.syncstatus.includes("failed"))||(null==i.metadataSyncInfo.syncstatus?null:i.metadataSyncInfo.syncstatus.includes("error")))("ngIfElse",o),e.xp6(3),e.Q6J("ngIf",i.metadataSyncInfo.timestamp)("ngIfElse",_)}}let Kl=(()=>{class t{constructor(){this.icons=$.P,this.metadataSyncInfo={}}}return t.\u0275fac=function(_){return new(_||t)},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-sync-metadata-info"]],inputs:{metadataSyncInfo:"metadataSyncInfo"},decls:2,vars:2,consts:function(){let n,_,o;return n="" + "\ufffd0\ufffd" + "",_="Not Syncing",o="Error",[[4,"ngIf"],[1,"me-2"],["metadataSyncPopover",""],["upToDateTpl",""],["showMetadataStatus",""],[4,"ngIf","ngIfElse"],[1,"mt-4","fw-bold"],["class","badge badge-info",4,"ngIf","ngIfElse"],[1,"text-center"],[4,"ngFor","ngForOf"],[1,"badge","badge-success"],["class","lead text-primary","placement","top","popoverClass","rgw-overview-card-popover",3,"ngbPopover",4,"ngIf"],["placement","top","popoverClass","rgw-overview-card-popover",1,"lead","text-primary",3,"ngbPopover"],n,_,[1,"text-danger",3,"ngClass"],["placement","top","popoverClass","rgw-overview-card-popover",1,"lead","text-danger",3,"ngbPopover"],o,[1,"badge","badge-info"]]},template:function(_,o){1&_&&(e.YNc(0,Dl,7,0,"span",0),e.YNc(1,Jl,15,4,"span",0)),2&_&&(e.Q6J("ngIf","no sync (zone is master)"===o.metadataSyncInfo),e.xp6(1),e.Q6J("ngIf","no sync (zone is master)"!==o.metadataSyncInfo))},dependencies:[T.mk,T.sg,T.O5,G.o8,T.rS,h_.h],styles:["ul[_ngcontent-%COMP%]{align-items:center;display:flex;flex-direction:column;list-style-type:none}"]}),t})();function Vl(t,n){if(1&t&&(e.TgZ(0,"span")(1,"b"),e._uU(2),e.ALo(3,"titlecase"),e.qZA(),e._uU(4),e.ALo(5,"titlecase"),e.qZA()),2&t){const _=e.oxw(2).$implicit;e.xp6(2),e.Oqu(e.lcZ(3,2,_.split(": ")[0])),e.xp6(2),e.hij(":",e.lcZ(5,4,_.split(": ")[1])," ")}}function jl(t,n){if(1&t&&(e.TgZ(0,"span")(1,"b"),e._uU(2),e.ALo(3,"titlecase"),e.qZA()()),2&t){const _=e.oxw(2).$implicit;e.xp6(2),e.Oqu(e.lcZ(3,1,_))}}function er(t,n){if(1&t&&(e.TgZ(0,"span"),e.YNc(1,Vl,6,6,"span",9),e.YNc(2,jl,4,3,"span",9),e.qZA()),2&t){const _=e.oxw().$implicit;e.xp6(1),e.Q6J("ngIf",null==_?null:_.includes(":")),e.xp6(1),e.Q6J("ngIf",!(null!=_&&_.includes(":")))}}function _r(t,n){if(1&t&&(e.TgZ(0,"span"),e._uU(1),e.ALo(2,"titlecase"),e.qZA()),2&t){const _=e.oxw().$implicit;e.xp6(1),e.hij(" ",e.lcZ(2,1,_)," ")}}function tr(t,n){if(1&t&&(e.TgZ(0,"li"),e.YNc(1,er,3,2,"span",9),e.YNc(2,_r,3,3,"span",9),e.qZA()),2&t){const _=n.$implicit,o=e.oxw(2);e.xp6(1),e.Q6J("ngIf",!(null!=_&&_.includes(o.zone.name)||null!=_&&_.includes(o.zone.syncstatus)||null!=_&&_.includes("failed")||null!=_&&_.includes("error"))),e.xp6(1),e.Q6J("ngIf",(null==_?null:_.includes("failed"))||(null==_?null:_.includes("error")))}}function or(t,n){if(1&t&&(e.TgZ(0,"ul",7)(1,"li")(2,"h5")(3,"b"),e._uU(4,"Sync Status:"),e.qZA()()(),e.YNc(5,tr,3,2,"li",8),e.qZA()),2&t){const _=e.oxw();e.xp6(5),e.Q6J("ngForOf",_.zone.fullSyncStatus)}}function nr(t,n){1&t&&(e.TgZ(0,"li",10),e._uU(1,"Up to Date"),e.qZA())}function ir(t,n){if(1&t&&(e.TgZ(0,"a",12),e.SDv(1,13),e.ALo(2,"titlecase"),e.qZA()),2&t){const _=e.oxw(2),o=e.MAs(1);e.Q6J("ngbPopover",o),e.xp6(2),e.pQV(e.lcZ(2,2,_.zone.syncstatus)),e.QtT(1)}}function sr(t,n){if(1&t&&(e.TgZ(0,"a",12),e.SDv(1,14),e.qZA()),2&t){e.oxw(2);const _=e.MAs(1);e.Q6J("ngbPopover",_)}}function ar(t,n){if(1&t&&(e.YNc(0,ir,3,4,"a",11),e.YNc(1,sr,2,1,"a",11)),2&t){const _=e.oxw();e.Q6J("ngIf","Not Syncing From Zone"!==_.zone.syncstatus),e.xp6(1),e.Q6J("ngIf","Not Syncing From Zone"===_.zone.syncstatus)}}const lr=function(t){return[t]};function rr(t,n){if(1&t&&(e.TgZ(0,"li"),e._UZ(1,"i",15),e.TgZ(2,"a",16),e.SDv(3,17),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(1);e.xp6(1),e.Q6J("ngClass",e.VKq(2,lr,_.icons.danger)),e.xp6(1),e.Q6J("ngbPopover",o)}}function cr(t,n){if(1&t&&(e.TgZ(0,"li",18),e._uU(1),e.ALo(2,"relativeDate"),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Oqu(e.lcZ(2,1,_.zone.timestamp))}}let dr=(()=>{class t{constructor(){this.icons=$.P,this.zone={}}}return t.\u0275fac=function(_){return new(_||t)},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-sync-data-info"]],inputs:{zone:"zone"},decls:14,vars:4,consts:function(){let n,_,o;return n="" + "\ufffd0\ufffd" + "",_="Not Syncing",o="Error",[["syncPopover",""],[1,"me-2"],["upToDateTpl",""],["showStatus",""],[4,"ngIf","ngIfElse"],[1,"mt-4","fw-bold"],["class","badge badge-info",4,"ngIf","ngIfElse"],[1,"text-center"],[4,"ngFor","ngForOf"],[4,"ngIf"],[1,"badge","badge-success"],["class","lead text-primary","placement","top","popoverClass","rgw-overview-card-popover",3,"ngbPopover",4,"ngIf"],["placement","top","popoverClass","rgw-overview-card-popover",1,"lead","text-primary",3,"ngbPopover"],n,_,[1,"text-danger",3,"ngClass"],["placement","top","popoverClass","rgw-overview-card-popover",1,"lead","text-danger",3,"ngbPopover"],o,[1,"badge","badge-info"]]},template:function(_,o){if(1&_&&(e.YNc(0,or,6,1,"ng-template",null,0,e.W1O),e.TgZ(2,"ul",1),e.YNc(3,nr,2,0,"ng-template",null,2,e.W1O),e.YNc(5,ar,2,2,"ng-template",null,3,e.W1O),e.TgZ(7,"li")(8,"b"),e._uU(9,"Status:"),e.qZA()(),e.YNc(10,rr,4,4,"li",4),e.TgZ(11,"li",5),e._uU(12," Last Synced: "),e.qZA(),e.YNc(13,cr,3,3,"li",6),e.qZA()),2&_){const i=e.MAs(4),s=e.MAs(6);e.xp6(10),e.Q6J("ngIf",(null==o.zone.syncstatus?null:o.zone.syncstatus.includes("failed"))||(null==o.zone.syncstatus?null:o.zone.syncstatus.includes("error")))("ngIfElse",s),e.xp6(3),e.Q6J("ngIf",o.zone.timestamp)("ngIfElse",i)}},dependencies:[T.mk,T.sg,T.O5,G.o8,T.rS,h_.h],styles:["ul[_ngcontent-%COMP%]{align-items:center;display:flex;flex-direction:column;list-style-type:none}"]}),t})();function ur(t,n){if(1&t&&e._UZ(0,"cd-card-row",25),2&t){const _=e.oxw();e.Q6J("data",_.rgwDaemonCount)}}function gr(t,n){if(1&t&&e._UZ(0,"cd-card-row",26),2&t){const _=e.oxw();e.Q6J("data",_.rgwRealmCount)}}function Rr(t,n){if(1&t&&e._UZ(0,"cd-card-row",27),2&t){const _=e.oxw();e.Q6J("data",_.rgwZonegroupCount)}}function Tr(t,n){if(1&t&&e._UZ(0,"cd-card-row",28),2&t){const _=e.oxw();e.Q6J("data",_.rgwZoneCount)}}function Er(t,n){if(1&t&&e._UZ(0,"cd-card-row",29),2&t){const _=e.oxw();e.Q6J("data",_.rgwBucketCount)}}function fr(t,n){if(1&t&&e._UZ(0,"cd-card-row",30),2&t){const _=e.oxw();e.Q6J("data",_.UserCount)}}function pr(t,n){if(1&t&&e._UZ(0,"cd-card-row",31),2&t){const _=e.oxw();e.Q6J("data",_.objectCount)}}function mr(t,n){1&t&&(e.TgZ(0,"span",32)(1,"cd-alert-panel",33),e.tHW(2,34),e._UZ(3,"cd-doc",35),e.N_p(),e.qZA()())}const __=function(t,n,_){return[t,n,_]};function Mr(t,n){if(1&t&&(e.TgZ(0,"span",36),e._UZ(1,"i",37),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.Q6J("ngClass",e.kEZ(1,__,_.icons.large3x,_.icons.spinner,_.icons.spin))}}function Sr(t,n){if(1&t&&(e.TgZ(0,"span",36),e._UZ(1,"i",37),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.Q6J("ngClass",e.kEZ(1,__,_.icons.large3x,_.icons.spinner,_.icons.spin))}}function Cr(t,n){if(1&t&&(e.TgZ(0,"span",36),e._UZ(1,"cd-rgw-sync-primary-zone",43),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.Q6J("realm",_.realm)("zonegroup",_.zonegroup)("zone",_.zone)}}function Or(t,n){if(1&t&&(e.TgZ(0,"span",36),e._UZ(1,"i",37),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.Q6J("ngClass",e.kEZ(1,__,_.icons.large3x,_.icons.spinner,_.icons.spin))}}function Fr(t,n){if(1&t&&(e.TgZ(0,"span",51),e._UZ(1,"cd-rgw-sync-metadata-info",52),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("metadataSyncInfo",_.metadataSyncInfo)}}function Pr(t,n){if(1&t&&(e.TgZ(0,"span",53),e._UZ(1,"cd-rgw-sync-data-info",54),e.qZA()),2&t){const _=e.oxw(2).$implicit;e.xp6(1),e.Q6J("zone",_)}}const Nr=function(t){return{"border-left":t}};function Gr(t,n){if(1&t&&(e.TgZ(0,"cd-card",48),e.YNc(1,Fr,2,1,"span",49),e.YNc(2,Pr,2,1,"span",50),e.qZA()),2&t){const _=n.$implicit;e.Q6J("cardTitle",_)("ngClass",e.VKq(6,Nr,"Data Sync"===_))("alignItemsCenter",!0)("justifyContentCenter",!0),e.xp6(1),e.Q6J("ngIf","Metadata Sync"===_),e.xp6(1),e.Q6J("ngIf","Data Sync"===_)}}function Ar(t,n){if(1&t&&(e.TgZ(0,"cd-card",45)(1,"div",46),e.YNc(2,Gr,3,8,"cd-card",47),e.qZA()()),2&t){const _=n.$implicit,o=e.oxw(4);e.s9C("cardTitle",_.name),e.xp6(2),e.Q6J("ngForOf",o.chartTitles)}}function Ir(t,n){if(1&t&&(e.TgZ(0,"div",1),e.YNc(1,Ar,3,2,"cd-card",44),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.Q6J("ngForOf",_.replicaZonesInfo)("ngForTrackBy",_.trackByFn)}}function br(t,n){if(1&t&&(e.TgZ(0,"div",39)(1,"cd-card",40),e.YNc(2,Sr,2,5,"span",23),e.YNc(3,Cr,2,3,"span",23),e.qZA(),e.TgZ(4,"div",41)(5,"cd-card",42),e.YNc(6,Or,2,5,"span",23),e.YNc(7,Ir,2,2,"div",24),e.qZA()()()),2&t){const _=e.oxw(2);e.xp6(1),e.Q6J("alignItemsCenter",!0)("justifyContentCenter",!0),e.xp6(1),e.Q6J("ngIf",_.loading),e.xp6(1),e.Q6J("ngIf",!_.loading),e.xp6(3),e.Q6J("ngIf",_.loading),e.xp6(1),e.Q6J("ngIf",!_.loading)}}function hr(t,n){if(1&t&&(e.TgZ(0,"div",1),e.YNc(1,br,8,6,"div",38),e.qZA()),2&t){const _=e.oxw(),o=e.MAs(30);e.xp6(1),e.Q6J("ngIf",_.showMultisiteCard)("ngIfElse",o)}}let Lr=(()=>{class t{constructor(_,o,i,s,l,r,d,u,R,O){this.authStorageService=_,this.healthService=o,this.refreshIntervalService=i,this.rgwDaemonService=s,this.rgwRealmService=l,this.rgwZonegroupService=r,this.rgwZoneService=d,this.rgwBucketService=u,this.prometheusService=R,this.rgwMultisiteService=O,this.icons=$.P,this.interval=new N_.w,this.rgwDaemonCount=0,this.rgwRealmCount=0,this.rgwZonegroupCount=0,this.rgwZoneCount=0,this.rgwBucketCount=0,this.objectCount=0,this.UserCount=0,this.totalPoolUsedBytes=0,this.averageObjectSize=0,this.multisiteInfo=[],this.queriesResults={RGW_REQUEST_PER_SECOND:"",BANDWIDTH:"",AVG_GET_LATENCY:"",AVG_PUT_LATENCY:""},this.chartTitles=["Metadata Sync","Data Sync"],this.replicaZonesInfo=[],this.showMultisiteCard=!0,this.loading=!0,this.subject=new Cl.t,this.syncCardLoading=!0,this.permissions=this.authStorageService.getPermissions()}ngOnInit(){this.interval=this.refreshIntervalService.intervalData$.subscribe(()=>{this.daemonSub=this.rgwDaemonService.list().subscribe(_=>{this.rgwDaemonCount=_.length}),this.HealthSub=this.healthService.getClusterCapacity().subscribe(_=>{this.objectCount=_.total_objects,this.totalPoolUsedBytes=_.total_pool_bytes_used,this.averageObjectSize=_.average_object_size}),this.getSyncStatus()}),this.BucketSub=this.rgwBucketService.getTotalBucketsAndUsersLength().subscribe(_=>{this.rgwBucketCount=_.buckets_count,this.UserCount=_.users_count}),this.realmSub=this.rgwRealmService.list().subscribe(_=>{this.rgwRealmCount=_.realms.length}),this.ZonegroupSub=this.rgwZonegroupService.list().subscribe(_=>{this.rgwZonegroupCount=_.zonegroups.length}),this.ZoneSUb=this.rgwZoneService.list().subscribe(_=>{this.rgwZoneCount=_.zones.length}),this.getPrometheusData(this.prometheusService.lastHourDateObject),this.multisiteSyncStatus$=this.subject.pipe((0,Pl.w)(()=>this.rgwMultisiteService.getSyncStatus().pipe((0,Nl.b)(_=>{this.loading=!1,this.replicaZonesInfo=_.dataSyncInfo,this.metadataSyncInfo=_.metadataSyncInfo,0===this.replicaZonesInfo.length&&(this.showMultisiteCard=!1,this.syncCardLoading=!1,this.loading=!1),[this.realm,this.zonegroup,this.zone]=_.primaryZoneData}),(0,a_.K)(_=>(this.showMultisiteCard=!1,this.syncCardLoading=!1,this.loading=!1,_.preventDefault(),(0,ke.of)(!0))))),(0,Gl.d)(1))}ngOnDestroy(){this.interval.unsubscribe(),this.daemonSub.unsubscribe(),this.realmSub.unsubscribe(),this.ZonegroupSub.unsubscribe(),this.ZoneSUb.unsubscribe(),this.BucketSub.unsubscribe(),this.HealthSub.unsubscribe(),this.prometheusService.unsubscribe()}getPrometheusData(_){this.queriesResults=this.prometheusService.getPrometheusQueriesData(_,Fl.p,this.queriesResults,!0)}getSyncStatus(){this.subject.next()}trackByFn(_){return _}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ce.j),e.Y36(Al.z),e.Y36(Il.s),e.Y36(oe.b),e.Y36(de.y),e.Y36(ge.K),e.Y36(ue.g),e.Y36(Ee.o),e.Y36(Ol.Q),e.Y36(e_.o))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-rgw-overview-dashboard"]],decls:34,vars:26,consts:function(){let n;return n=" Multi-site needs to be configured in order to see the multi-site sync status. Please consult the " + "\ufffd#3\ufffd" + "" + "\ufffd/#3\ufffd" + " on how to configure and enable the multi-site functionality. ",[[1,"container-fluid"],[1,"row"],["cardTitle","Inventory","aria-label","Inventory card",1,"col-sm-3","px-3","d-flex"],["link","/rgw/daemon","title","Gateway","summaryType","simplified",3,"data",4,"ngIf"],["link","/rgw/multisite","title","Realm","summaryType","simplified",3,"data",4,"ngIf"],["link","/rgw/multisite","title","Zone Group","summaryType","simplified",3,"data",4,"ngIf"],["link","/rgw/multisite","title","Zone","summaryType","simplified",3,"data",4,"ngIf"],["link","/rgw/bucket","title","Bucket","summaryType","simplified",3,"data",4,"ngIf"],["link","/rgw/user","title","User","summaryType","simplified",3,"data",4,"ngIf"],["title","Object","summaryType","simplified",3,"data",4,"ngIf"],["cardTitle","Performance Statistics","ria-label","Performance Statistics card",1,"col-sm-6","d-flex"],[1,"ms-4","me-4","mt-0"],[3,"selectedTime"],["chartTitle","Requests/sec","dataUnits","","label","Requests/sec",3,"data"],["chartTitle","Latency","dataUnits","ms","label","GET","label2","PUT",3,"data","data2"],["chartTitle","Bandwidth","dataUnits","B","label","GET","label2","PUT",3,"data","data2"],[1,"col-lg-3"],["cardTitle","Used Capacity","aria-label","Used Capacity",1,"col-sm-2","d-flex","w-100","h-50","pb-3",3,"alignItemsCenter","justifyContentCenter"],[1,"ms-4","me-4","text-center"],["cardTitle","Average Object Size","aria-label","Avg Object Size",1,"col-sm-2","d-flex","w-100","h-50","pt-3",3,"alignItemsCenter","justifyContentCenter"],[1,"row","pt-4","pb-4"],["cardTitle","Multi-Site Sync Status"],["notConfigured",""],["class","d-flex justify-content-center",4,"ngIf"],["class","row",4,"ngIf"],["link","/rgw/daemon","title","Gateway","summaryType","simplified",3,"data"],["link","/rgw/multisite","title","Realm","summaryType","simplified",3,"data"],["link","/rgw/multisite","title","Zone Group","summaryType","simplified",3,"data"],["link","/rgw/multisite","title","Zone","summaryType","simplified",3,"data"],["link","/rgw/bucket","title","Bucket","summaryType","simplified",3,"data"],["link","/rgw/user","title","User","summaryType","simplified",3,"data"],["title","Object","summaryType","simplified",3,"data"],[1,"pe-5","ps-5"],["type","info"],n,["section","multisite"],[1,"d-flex","justify-content-center"],[3,"ngClass"],["class","row pt-2",4,"ngIf","ngIfElse"],[1,"row","pt-2"],["cardTitle","Primary Source Zone",1,"col-lg-3","d-flex","justify-content-center","align-primary-zone",3,"alignItemsCenter","justifyContentCenter"],[1,"col-lg-9"],["cardTitle","Source Zones",1,"d-flex","h-100"],[3,"realm","zonegroup","zone"],["cardType","zone","shadowClass","true","class","col-sm-9 col-lg-6 align-replica-zones d-flex pt-4","aria-label","Source Zones Card",3,"cardTitle",4,"ngFor","ngForOf","ngForTrackBy"],["cardType","zone","shadowClass","true","aria-label","Source Zones Card",1,"col-sm-9","col-lg-6","align-replica-zones","d-flex","pt-4",3,"cardTitle"],[1,"row","pb-4","ps-3","pe-3"],["cardType","syncCards","removeBorder","true","class","col-sm-9 col-lg-6","aria-label","Charts Card",3,"cardTitle","ngClass","alignItemsCenter","justifyContentCenter",4,"ngFor","ngForOf"],["cardType","syncCards","removeBorder","true","aria-label","Charts Card",1,"col-sm-9","col-lg-6",3,"cardTitle","ngClass","alignItemsCenter","justifyContentCenter"],["class","me-2 text-center",4,"ngIf"],["class","me-2",4,"ngIf"],[1,"me-2","text-center"],[3,"metadataSyncInfo"],[1,"me-2"],[3,"zone"]]},template:function(_,o){1&_&&(e.TgZ(0,"div",0)(1,"div",1)(2,"cd-card",2),e.YNc(3,ur,1,1,"cd-card-row",3),e.YNc(4,gr,1,1,"cd-card-row",4),e.YNc(5,Rr,1,1,"cd-card-row",5),e.YNc(6,Tr,1,1,"cd-card-row",6),e.YNc(7,Er,1,1,"cd-card-row",7),e.YNc(8,fr,1,1,"cd-card-row",8),e.YNc(9,pr,1,1,"cd-card-row",9),e.qZA(),e.TgZ(10,"cd-card",10)(11,"div",11)(12,"cd-dashboard-time-selector",12),e.NdJ("selectedTime",function(s){return o.getPrometheusData(s)}),e.qZA(),e._UZ(13,"cd-dashboard-area-chart",13)(14,"cd-dashboard-area-chart",14)(15,"cd-dashboard-area-chart",15),e.qZA()(),e.TgZ(16,"div",16)(17,"cd-card",17)(18,"span",18)(19,"h1"),e._uU(20),e.ALo(21,"dimlessBinary"),e.qZA()()(),e.TgZ(22,"cd-card",19)(23,"span",18)(24,"h1"),e._uU(25),e.ALo(26,"dimlessBinary"),e.qZA()()()()(),e.TgZ(27,"div",20)(28,"cd-card",21),e.YNc(29,mr,4,0,"ng-template",null,22,e.W1O),e.YNc(31,Mr,2,5,"span",23),e.YNc(32,hr,2,2,"div",24),e.ALo(33,"async"),e.qZA()()()),2&_&&(e.xp6(3),e.Q6J("ngIf",null!=o.rgwDaemonCount),e.xp6(1),e.Q6J("ngIf",null!=o.rgwRealmCount),e.xp6(1),e.Q6J("ngIf",null!=o.rgwZonegroupCount),e.xp6(1),e.Q6J("ngIf",null!=o.rgwZoneCount),e.xp6(1),e.Q6J("ngIf",null!=o.rgwBucketCount),e.xp6(1),e.Q6J("ngIf",null!=o.UserCount),e.xp6(1),e.Q6J("ngIf",null!=o.objectCount),e.xp6(4),e.Q6J("data",o.queriesResults.RGW_REQUEST_PER_SECOND),e.xp6(1),e.Q6J("data",o.queriesResults.AVG_GET_LATENCY)("data2",o.queriesResults.AVG_PUT_LATENCY),e.xp6(1),e.Q6J("data",o.queriesResults.GET_BANDWIDTH)("data2",o.queriesResults.PUT_BANDWIDTH),e.xp6(2),e.Q6J("alignItemsCenter",!0)("justifyContentCenter",!0),e.xp6(3),e.Oqu(e.lcZ(21,20,o.totalPoolUsedBytes)),e.xp6(2),e.Q6J("alignItemsCenter",!0)("justifyContentCenter",!0),e.xp6(3),e.Oqu(e.lcZ(26,22,o.averageObjectSize)),e.xp6(6),e.Q6J("ngIf",o.loading),e.xp6(1),e.Q6J("ngIf",e.lcZ(33,24,o.multisiteSyncStatus$)))},dependencies:[T.mk,T.sg,T.O5,Re.G,bl.K,hl.A,Ll.e,Wl.S,$l.M,Zl,Kl,dr,T.Ov,Ye.$],styles:["hr[_ngcontent-%COMP%]{margin-bottom:2px;margin-top:2px}.list-group-item[_ngcontent-%COMP%]{border:0}.align-replica-zones[_ngcontent-%COMP%]{margin-left:auto;margin-right:auto;padding-left:2em;padding-right:2em}ul[_ngcontent-%COMP%]{align-items:center;display:flex;flex-direction:column;list-style-type:none}.align-primary-zone[_ngcontent-%COMP%]{padding-left:4em}.border-left[_ngcontent-%COMP%]{border-left:1px solid rgba(0,0,0,.1254901961)}"]}),t})();var Wr=c(46767);let L_=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[T.ez,et.m,a.u5,a.UX,_t.B,G.Oz,J.Bz,G.HK,G.dT,o_.b,je.xc,Sl.t,Wr.d]}),t})();const $r=[{path:"",redirectTo:"rbd",pathMatch:"full"},{path:"daemon",component:Xo,data:{breadcrumbs:"Gateways"}},{path:"user",data:{breadcrumbs:"Users"},children:[{path:"",component:ss},{path:I.MQ.CREATE,component:O_,data:{breadcrumbs:I.Qn.CREATE}},{path:`${I.MQ.EDIT}/:uid`,component:O_,data:{breadcrumbs:I.Qn.EDIT}}]},{path:"roles",data:{breadcrumbs:"Roles",resource:"api.rgw.roles@1.0",tabs:[{name:"Users",url:"/rgw/user"},{name:"Roles",url:"/rgw/roles"}]},children:[{path:"",component:j_.c},{path:I.MQ.CREATE,component:as.U,data:{breadcrumbs:I.Qn.CREATE}}]},{path:"bucket",data:{breadcrumbs:"Buckets"},children:[{path:"",component:ho},{path:I.MQ.CREATE,component:d_,data:{breadcrumbs:I.Qn.CREATE}},{path:`${I.MQ.EDIT}/:bid`,component:d_,data:{breadcrumbs:I.Qn.EDIT}}]},{path:"overview",data:{breadcrumbs:"Overview"},children:[{path:"",component:Lr}]},{path:"multisite",children:[{path:"",component:Te}]}];let Zr=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[L_,J.Bz.forChild($r)]}),t})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/95.1ae8f43a396d3fea.js b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/95.1ae8f43a396d3fea.js deleted file mode 100644 index a22b03bd4..000000000 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/95.1ae8f43a396d3fea.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkceph_dashboard=self.webpackChunkceph_dashboard||[]).push([[95],{38095:(Fi,Fe,r)=>{r.r(Fe),r.d(Fe,{PoolModule:()=>qe,RoutedPoolModule:()=>Ai});var C=r(11048),a=r(1659),Oe=r(55860),g=r(71334),M=r(79512),d_=r(44466),u_=r(21043),P_=r(370),f_=r(23815),u=r.n(f_),E_=r(7357),g_=r(26504),ue=r(80842);class T{constructor(){this.nodes=[],this.idTree={},this.allDevices=[],this.buckets=[],this.failureDomains={},this.failureDomainKeys=[],this.devices=[],this.deviceCount=0}static searchFailureDomains(i,_){return this.getFailureDomains(this.search(i,_))}static search(i,_){const[o,n]=_.split("~"),s=i.find(c=>["name","id","type"].some(d=>c[d]===o));return s?(i=this.getSubNodes(s,this.createIdTreeFromNodes(i)),n&&(i=this.filterNodesByDeviceType(i,n)),i):[]}static createIdTreeFromNodes(i){const _={};return i.forEach(o=>{_[o.id]=o}),_}static getSubNodes(i,_){let o=[i];return i.children&&i.children.forEach(n=>{o=o.concat(this.getSubNodes(_[n],_))}),o}static filterNodesByDeviceType(i,_){let n,o=i.filter(c=>c.device_class&&c.device_class!==_).map(c=>c.id),s=o;do{n=!1,i=i.filter(d=>!o.includes(d.id));const c=[];i.forEach(d=>{d.children&&d.children.every(f=>o.includes(f))&&(c.push(d.id),n=!0)}),n&&(o=c,s=s.concat(c))}while(n);return(i=u().cloneDeep(i)).map(c=>(c.children&&(c.children=c.children.filter(d=>!s.includes(d))),c))}static getFailureDomains(i){const _={};return i.forEach(o=>{const n=o.type;_[n]||(_[n]=[]),_[n].push(o)}),_}initCrushNodeSelection(i,_,o,n){this.nodes=i,this.idTree=T.createIdTreeFromNodes(i),i.forEach(s=>{this.idTree[s.id]=s}),this.buckets=u().sortBy(i.filter(s=>s.children),"name"),this.controls={root:_,failure:o,device:n},this.preSelectRoot(),this.controls.root.valueChanges.subscribe(()=>this.onRootChange()),this.controls.failure.valueChanges.subscribe(()=>this.onFailureDomainChange()),this.controls.device.valueChanges.subscribe(()=>this.onDeviceChange())}preSelectRoot(){const i=this.nodes.find(_=>"root"===_.type);this.silentSet(this.controls.root,i),this.onRootChange()}silentSet(i,_){i.setValue(_,{emitEvent:!1})}onRootChange(){const i=T.getSubNodes(this.controls.root.value,this.idTree),_=T.getFailureDomains(i);Object.keys(_).forEach(o=>{_[o].length<=1&&delete _[o]}),this.failureDomains=_,this.failureDomainKeys=Object.keys(_).sort(),this.updateFailureDomain()}updateFailureDomain(){let i=this.getIncludedCustomValue(this.controls.failure,Object.keys(this.failureDomains));""===i&&(i=this.setMostCommonDomain(this.controls.failure)),this.updateDevices(i)}getIncludedCustomValue(i,_){return i.dirty&&_.includes(i.value)?i.value:""}setMostCommonDomain(i){let _={n:0,type:""};return Object.keys(this.failureDomains).forEach(o=>{const n=this.failureDomains[o].length;_.nT.getSubNodes(n,this.idTree)));this.allDevices=_.filter(n=>n.device_class).map(n=>n.device_class),this.devices=u().uniq(this.allDevices).sort();const o=1===this.devices.length?this.devices[0]:this.getIncludedCustomValue(this.controls.device,this.devices);this.silentSet(this.controls.device,o),this.onDeviceChange(o)}onDeviceChange(i=this.controls.device.value){this.deviceCount=""===i?this.allDevices.length:this.allDevices.filter(_=>_===i).length}}var Ne=r(30982),p_=r(14745),b=r(65862),R_=r(93614),be=r(95463),E=r(90070),m_=r(30633),v=r(76111),C_=r(47557),M_=r(28211),de=r(32337),e=r(89724),ve=r(62862),Ie=r(83608),$e=r(60312),Pe=r(41582),fe=r(56310),Ee=r(87925),ge=r(94276),pe=r(82945),Re=r(18372),me=r(30839),Ce=r(10545);function h_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,31),e.qZA())}function S_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,32),e.qZA())}function T_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,33),e.qZA())}function L_(t,i){1&t&&(e.TgZ(0,"option",26),e.SDv(1,34),e.qZA())}function A_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function F_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,36),e.qZA())}function N_(t,i){1&t&&(e.TgZ(0,"option",26),e.SDv(1,37),e.qZA())}function b_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function v_(t,i){1&t&&(e.TgZ(0,"span",30),e.SDv(1,38),e.qZA())}function I_(t,i){if(1&t&&(e.TgZ(0,"option",35),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let $_=(()=>{class t extends T{constructor(_,o,n,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=n,this.crushRuleService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.crushRuleService.formTooltips,this.action=this.actionLabels.CREATE,this.resource="Crush Rule",this.createForm()}createForm(){this.form=this.formBuilder.group({name:["",[a.kI.required,a.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],root:null,failure_domain:"",device_class:""})}ngOnInit(){this.crushRuleService.getInfo().subscribe(({names:_,nodes:o})=>{this.initCrushNodeSelection(o,this.form.get("root"),this.form.get("failure_domain"),this.form.get("device_class")),this.names=_})}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=u().cloneDeep(this.form.value);_.root=_.root.name,""===_.device_class&&delete _.device_class,this.taskWrapper.wrapTaskAroundCall({task:new v.R("crushRule/create",_),call:this.crushRuleService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ve.O),e.Y36(g.Kz),e.Y36(de.P),e.Y36(Ie.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-crush-rule-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:55,vars:27,consts:function(){let i,_,o,n,s,c,d,f,p,R,h,S,m;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Root",n="Failure domain type",s="Device class",c="Let Ceph decide",d="This field is required!",f="The name can only consist of alphanumeric characters, dashes and underscores.",p="The chosen erasure code profile name is already in use.",R="Loading...",h="This field is required!",S="Loading...",m="This field is required!",[[3,"modalRef"],[1,"modal-title"],i,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"required"],[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","root",1,"cd-col-form-label"],o,[3,"html"],["id","root","name","root","formControlName","root",1,"form-select"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","failure_domain",1,"cd-col-form-label"],n,["id","failure_domain","name","failure_domain","formControlName","failure_domain",1,"form-select"],["for","device_class",1,"cd-col-form-label"],s,["id","device_class","name","device_class","formControlName","device_class",1,"form-select"],["ngValue",""],c,[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],d,f,p,R,[3,"ngValue"],h,S,m]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.ynx(11),e.SDv(12,9),e.BQk(),e._UZ(13,"span",10),e.qZA(),e.TgZ(14,"div",11),e._UZ(15,"input",12),e.YNc(16,h_,2,0,"span",13),e.YNc(17,S_,2,0,"span",13),e.YNc(18,T_,2,0,"span",13),e.qZA()(),e.TgZ(19,"div",7)(20,"label",14),e.ynx(21),e.SDv(22,15),e.BQk(),e._UZ(23,"cd-helper",16)(24,"span",10),e.qZA(),e.TgZ(25,"div",11)(26,"select",17),e.YNc(27,L_,2,0,"option",18),e.YNc(28,A_,2,2,"option",19),e.qZA(),e.YNc(29,F_,2,0,"span",13),e.qZA()(),e.TgZ(30,"div",7)(31,"label",20),e.ynx(32),e.SDv(33,21),e.BQk(),e._UZ(34,"cd-helper",16)(35,"span",10),e.qZA(),e.TgZ(36,"div",11)(37,"select",22),e.YNc(38,N_,2,0,"option",18),e.YNc(39,b_,2,3,"option",19),e.qZA(),e.YNc(40,v_,2,0,"span",13),e.qZA()(),e.TgZ(41,"div",7)(42,"label",23),e.ynx(43),e.SDv(44,24),e.BQk(),e._UZ(45,"cd-helper",16),e.qZA(),e.TgZ(46,"div",11)(47,"select",25)(48,"option",26),e.SDv(49,27),e.qZA(),e.YNc(50,I_,2,2,"option",19),e.qZA()()()(),e.TgZ(51,"div",28)(52,"cd-form-button-panel",29),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(53,"titlecase"),e.ALo(54,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const n=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,19,o.action))(e.lcZ(4,21,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(10),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.root),e.xp6(4),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(1),e.Q6J("ngIf",o.form.showError("root",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.failure_domain),e.xp6(4),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.form.showError("failure_domain",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.device_class),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(53,23,o.action)+" "+e.lcZ(54,25,o.resource))}},directives:[$e.z,a._Y,a.JL,Pe.V,a.sg,fe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,Re.S,a.EJ,a.YN,a.Kr,C.sg,me.p],pipes:[C.rS,Ce.m],styles:[""]}),t})();class D_{}var x_=r(19725);let Me=(()=>{class t{constructor(_){this.http=_,this.apiPath="api/erasure_code_profile",this.formTooltips={k:"Each object is split in data-chunks parts, each stored on a different OSD.",m:"Compute coding chunks for each object and store them on different OSDs.\n The number of coding chunks is also the number of OSDs that can be down without losing data.",plugins:{jerasure:{description:"The jerasure plugin is the most generic and flexible plugin,\n it is also the default for Ceph erasure coded pools.",technique:"The more flexible technique is reed_sol_van : it is enough to set k\n and m. The cauchy_good technique can be faster but you need to chose the packetsize\n carefully. All of reed_sol_r6_op, liberation, blaum_roth, liber8tion are RAID6 equivalents\n in the sense that they can only be configured with m=2.",packetSize:"The encoding will be done on packets of bytes size at a time.\n Choosing the right packet size is difficult.\n The jerasure documentation contains extensive information on this topic."},lrc:{description:"With the jerasure plugin, when an erasure coded object is stored on\n multiple OSDs, recovering from the loss of one OSD requires reading from all the others.\n For instance if jerasure is configured with k=8 and m=4, losing one OSD requires reading\n from the eleven others to repair.\n\n The lrc erasure code plugin creates local parity chunks to be able to recover using\n less OSDs. For instance if lrc is configured with k=8, m=4 and l=4, it will create\n an additional parity chunk for every four OSDs. When a single OSD is lost, it can be\n recovered with only four OSDs instead of eleven.",l:"Group the coding and data chunks into sets of size locality. For instance,\n for k=4 and m=2, when locality=3 two groups of three are created. Each set can\n be recovered without reading chunks from another set.",crushLocality:"The type of the crush bucket in which each set of chunks defined\n by l will be stored. For instance, if it is set to rack, each group of l chunks will be\n placed in a different rack. It is used to create a CRUSH rule step such as step choose\n rack. If it is not set, no such grouping is done."},isa:{description:"The isa plugin encapsulates the ISA library. It only runs on Intel processors.",technique:"The ISA plugin comes in two Reed Solomon forms.\n If reed_sol_van is set, it is Vandermonde, if cauchy is set, it is Cauchy."},shec:{description:"The shec plugin encapsulates the multiple SHEC library.\n It allows ceph to recover data more efficiently than Reed Solomon codes.",c:"The number of parity chunks each of which includes each data chunk in its\n calculation range. The number is used as a durability estimator. For instance, if c=2,\n 2 OSDs can be down without losing data."},clay:{description:"CLAY (short for coupled-layer) codes are erasure codes designed to\n bring about significant savings in terms of network bandwidth and disk IO when a failed\n node/OSD/rack is being repaired.",d:"Number of OSDs requested to send data during recovery of a single chunk.\n d needs to be chosen such that k+1 <= d <= k+m-1. The larger the d, the better\n the savings.",scalar_mds:"scalar_mds specifies the plugin that is used as a building block\n in the layered construction. It can be one of jerasure, isa, shec.",technique:"technique specifies the technique that will be picked\n within the 'scalar_mds' plugin specified. Supported techniques\n are 'reed_sol_van', 'reed_sol_r6_op', 'cauchy_orig',\n 'cauchy_good', 'liber8tion' for jerasure, 'reed_sol_van',\n 'cauchy' for isa and 'single', 'multiple' for shec."}},crushRoot:"The name of the crush bucket used for the first step of the CRUSH rule.\n For instance step take default.",crushFailureDomain:"Ensure that no two chunks are in a bucket with the same failure\n domain. For instance, if the failure domain is host no two chunks will be stored on the same\n host. It is used to create a CRUSH rule step such as step chooseleaf host.",crushDeviceClass:"Restrict placement to devices of a specific class\n (e.g., ssd or hdd), using the crush device class names in the CRUSH map.",directory:"Set the directory name from which the erasure code plugin is loaded."}}list(){return this.http.get(this.apiPath)}create(_){return this.http.post(this.apiPath,_,{observe:"response"})}delete(_){return this.http.delete(`${this.apiPath}/${_}`,{observe:"response"})}getInfo(){return this.http.get(`ui-${this.apiPath}/info`)}}return t.\u0275fac=function(_){return new(_||t)(e.LFG(x_.eN))},t.\u0275prov=e.Yz7({token:t,factory:t.\u0275fac,providedIn:"root"}),t})();function y_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,47),e.qZA())}function Z_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,48),e.qZA())}function U_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,49),e.qZA())}function H_(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,50),e.qZA())}function G_(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function z_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,52),e.qZA())}function q_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,53),e.qZA())}function X_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,54),e.qZA())}function w_(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,55),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function Q_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,56),e.qZA())}function J_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,57),e.qZA())}function k_(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,58),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.lrcMultiK),e.QtT(1)}}function V_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,59),e.qZA())}function Y_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,60),e.qZA())}function B_(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,61),e.qZA()),2&t){const _=e.oxw();e.xp6(1),e.pQV(_.deviceCount),e.QtT(1)}}function j_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,65),e.qZA())}function K_(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,66),e.qZA())}function W_(t,i){if(1&t&&(e.TgZ(0,"div",7)(1,"label",62)(2,"span",14),e.SDv(3,63),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",64),e.YNc(7,j_,2,0,"span",12),e.YNc(8,K_,2,0,"span",12),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.shec.c),e.xp6(3),e.Q6J("ngIf",_.form.showError("c",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("c",o,"cGreaterM"))}}function eo(t,i){1&t&&(e.TgZ(0,"span",39),e.SDv(1,74),e.qZA())}function _o(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,75),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMin())(_.getDMax()),e.QtT(1)}}function oo(t,i){if(1&t&&(e.TgZ(0,"span",39),e.SDv(1,76),e.qZA()),2&t){const _=e.oxw(3);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function to(t,i){if(1&t&&(e.ynx(0),e.YNc(1,_o,2,2,"span",23),e.YNc(2,oo,2,1,"span",23),e.BQk()),2&t){const _=e.oxw(2);e.xp6(1),e.Q6J("ngIf",_.getDMin()<_.getDMax()),e.xp6(1),e.Q6J("ngIf",_.getDMin()===_.getDMax())}}function io(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,77),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMin()),e.QtT(1)}}function no(t,i){if(1&t&&(e.TgZ(0,"span",46),e.SDv(1,78),e.qZA()),2&t){const _=e.oxw(2);e.xp6(1),e.pQV(_.getDMax()),e.QtT(1)}}function so(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",7)(1,"label",67)(2,"span",14),e.SDv(3,68),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"div",69),e._UZ(7,"input",70),e.TgZ(8,"button",71),e.NdJ("click",function(){return e.CHM(_),e.oxw().toggleDCalc()}),e._UZ(9,"i",72),e.qZA()(),e.YNc(10,eo,2,0,"span",23),e.YNc(11,to,3,2,"ng-container",73),e.YNc(12,io,2,1,"span",12),e.YNc(13,no,2,1,"span",12),e.qZA()()}if(2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.d),e.xp6(5),e.Q6J("ngClass",_.dCalc?_.icons.unlock:_.icons.lock),e.xp6(1),e.Q6J("ngIf",_.dCalc),e.xp6(1),e.Q6J("ngIf",!_.dCalc),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMin")),e.xp6(1),e.Q6J("ngIf",_.form.showError("d",o,"dMax"))}}function ao(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,83),e.qZA())}function lo(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,84),e.qZA())}function ro(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,85),e.qZA())}function co(t,i){if(1&t&&(e.TgZ(0,"div",7)(1,"label",79)(2,"span",14),e.SDv(3,80),e.qZA(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",81),e.YNc(7,ao,2,0,"span",12),e.YNc(8,lo,2,0,"span",12),e.YNc(9,ro,2,0,"span",12),e.TgZ(10,"span",39),e.SDv(11,82),e.qZA()()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.l),e.xp6(3),e.Q6J("ngIf",_.form.showError("l",o,"required")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"min")),e.xp6(1),e.Q6J("ngIf",_.form.showError("l",o,"unequal")),e.xp6(2),e.pQV(_.lrcGroups),e.QtT(11)}}function Oo(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,86),e.qZA())}function uo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw();e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function Po(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,90),e.qZA())}function fo(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,91),e.qZA())}function Eo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw(2);e.Q6J("ngValue",_),e.xp6(1),e.AsE(" ",_," ( ",o.failureDomains[_].length," ) ")}}function go(t,i){if(1&t&&(e.TgZ(0,"div",7)(1,"label",87),e.ynx(2),e.SDv(3,88),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"select",89),e.YNc(7,Po,2,0,"option",18),e.YNc(8,fo,2,0,"option",18),e.YNc(9,Eo,2,3,"option",19),e.qZA()()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.lrc.crushLocality),e.xp6(3),e.Q6J("ngIf",!_.failureDomains),e.xp6(1),e.Q6J("ngIf",_.failureDomainKeys.length>0),e.xp6(1),e.Q6J("ngForOf",_.failureDomainKeys)}}function po(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}const De=function(t,i,_){return[t,i,_]};function Ro(t,i){if(1&t&&(e.TgZ(0,"div",7)(1,"label",92),e.ynx(2),e.SDv(3,93),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"select",94),e.YNc(7,po,2,2,"option",19),e.qZA()()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins.clay.scalar_mds),e.xp6(3),e.Q6J("ngForOf",e.kEZ(2,De,_.PLUGIN.JERASURE,_.PLUGIN.ISA,_.PLUGIN.SHEC))}}function mo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}function Co(t,i){if(1&t&&(e.TgZ(0,"div",7)(1,"label",95),e.ynx(2),e.SDv(3,96),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10)(6,"select",97),e.YNc(7,mo,2,2,"option",19),e.qZA()()()),2&t){const _=e.oxw();e.xp6(4),e.Q6J("html",_.tooltips.plugins[_.plugin].technique),e.xp6(3),e.Q6J("ngForOf",_.techniques)}}function Mo(t,i){1&t&&(e.TgZ(0,"span",46),e.SDv(1,101),e.qZA())}function ho(t,i){if(1&t&&(e.TgZ(0,"div",7)(1,"label",98),e.ynx(2),e.SDv(3,99),e.BQk(),e._UZ(4,"cd-helper",16),e.qZA(),e.TgZ(5,"div",10),e._UZ(6,"input",100),e.YNc(7,Mo,2,0,"span",12),e.qZA()()),2&t){const _=e.oxw(),o=e.MAs(7);e.xp6(4),e.Q6J("html",_.tooltips.plugins.jerasure.packetSize),e.xp6(3),e.Q6J("ngIf",_.form.showError("packetSize",o,"min"))}}function So(t,i){1&t&&(e.TgZ(0,"option",37),e.SDv(1,102),e.qZA())}function To(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}function Lo(t,i){if(1&t&&(e.TgZ(0,"option",51),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_," ")}}let Ao=(()=>{class t extends T{constructor(_,o,n,s,c){super(),this.formBuilder=_,this.activeModal=o,this.taskWrapper=n,this.ecpService=s,this.actionLabels=c,this.submitAction=new e.vpe,this.tooltips=this.ecpService.formTooltips,this.PLUGIN={LRC:"lrc",SHEC:"shec",CLAY:"clay",JERASURE:"jerasure",ISA:"isa"},this.plugin=this.PLUGIN.JERASURE,this.icons=b.P,this.action=this.actionLabels.CREATE,this.resource="EC Profile",this.createForm(),this.setJerasureDefaults()}createForm(){this.form=this.formBuilder.group({name:[null,[a.kI.required,a.kI.pattern("[A-Za-z0-9_-]+"),E.h.custom("uniqueName",_=>this.names&&-1!==this.names.indexOf(_))]],plugin:[this.PLUGIN.JERASURE,[a.kI.required]],k:[4,[a.kI.required,E.h.custom("max",()=>this.baseValueValidation(!0)),E.h.custom("unequal",_=>this.lrcDataValidation(_)),E.h.custom("kLowerM",_=>this.shecDataValidation(_))]],m:[2,[a.kI.required,E.h.custom("max",()=>this.baseValueValidation())]],crushFailureDomain:"",crushRoot:null,crushDeviceClass:"",directory:"",technique:"reed_sol_van",packetSize:[2048],l:[3,[a.kI.required,E.h.custom("unequal",_=>this.lrcLocalityValidation(_))]],crushLocality:"",c:[2,[a.kI.required,E.h.custom("cGreaterM",_=>this.shecDurabilityValidation(_))]],d:[5,[a.kI.required,E.h.custom("dMin",_=>this.dMinValidation(_)),E.h.custom("dMax",_=>this.dMaxValidation(_))]],scalar_mds:[this.PLUGIN.JERASURE,[a.kI.required]]}),this.toggleDCalc(),this.form.get("k").valueChanges.subscribe(()=>this.updateValidityOnChange(["m","l","d"])),this.form.get("m").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","l","c","d"])),this.form.get("l").valueChanges.subscribe(()=>this.updateValidityOnChange(["k","m"])),this.form.get("plugin").valueChanges.subscribe(_=>this.onPluginChange(_)),this.form.get("scalar_mds").valueChanges.subscribe(()=>this.setClayDefaultsForScalar())}baseValueValidation(_=!1){return this.validValidation(()=>this.getKMSum()>this.deviceCount&&this.form.getValue("k")>this.form.getValue("m")===_)}validValidation(_,o){return!((!this.form||o)&&this.plugin!==o)&&_()}getKMSum(){return this.form.getValue("k")+this.form.getValue("m")}lrcDataValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m"),n=this.form.getValue("l"),s=_+o;return this.lrcMultiK=_/(s/n),_%(s/n)!=0},"lrc")}shecDataValidation(_){return this.validValidation(()=>this.form.getValue("m")>_,"shec")}lrcLocalityValidation(_){return this.validValidation(()=>{const o=this.getKMSum();return this.lrcGroups=_>0?o/_:0,_>0&&o%_!=0},"lrc")}shecDurabilityValidation(_){return this.validValidation(()=>{const o=this.form.getValue("m");return _>o},"shec")}dMinValidation(_){return this.validValidation(()=>this.getDMin()>_,"clay")}getDMin(){return this.form.getValue("k")+1}dMaxValidation(_){return this.validValidation(()=>_>this.getDMax(),"clay")}getDMax(){const _=this.form.getValue("m");return this.form.getValue("k")+_-1}toggleDCalc(){this.dCalc=!this.dCalc,this.form.get("d")[this.dCalc?"disable":"enable"](),this.calculateD()}calculateD(){this.plugin!==this.PLUGIN.CLAY||!this.dCalc||this.form.silentSet("d",this.getDMax())}updateValidityOnChange(_){_.forEach(o=>{"d"===o&&this.calculateD(),this.form.get(o).updateValueAndValidity({emitEvent:!1})})}onPluginChange(_){this.plugin=_,_===this.PLUGIN.JERASURE?this.setJerasureDefaults():_===this.PLUGIN.LRC?this.setLrcDefaults():_===this.PLUGIN.ISA?this.setIsaDefaults():_===this.PLUGIN.SHEC?this.setShecDefaults():_===this.PLUGIN.CLAY&&this.setClayDefaults(),this.updateValidityOnChange(["m"])}setJerasureDefaults(){this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liberation","blaum_roth","liber8tion"],this.setDefaults({k:4,m:2,technique:"reed_sol_van"})}setLrcDefaults(){this.setDefaults({k:4,m:2,l:3})}setIsaDefaults(){this.techniques=["reed_sol_van","cauchy"],this.setDefaults({k:7,m:3,technique:"reed_sol_van"})}setShecDefaults(){this.setDefaults({k:4,m:3,c:2})}setClayDefaults(){this.setDefaults({k:4,m:2,scalar_mds:this.PLUGIN.JERASURE}),this.setClayDefaultsForScalar()}setClayDefaultsForScalar(){const _=this.form.getValue("scalar_mds");let o="reed_sol_van";_===this.PLUGIN.JERASURE?this.techniques=["reed_sol_van","reed_sol_r6_op","cauchy_orig","cauchy_good","liber8tion"]:_===this.PLUGIN.ISA?this.techniques=["reed_sol_van","cauchy"]:(o="single",this.techniques=["single","multiple"]),this.setDefaults({technique:o})}setDefaults(_){Object.keys(_).forEach(o=>{const n=this.form.get(o),s=n.value;n.pristine||"technique"===o&&!this.techniques.includes(s)||"k"===o&&[4,7].includes(s)||"m"===o&&[2,3].includes(s)?n.setValue(_[o]):n.updateValueAndValidity()})}ngOnInit(){this.ecpService.getInfo().subscribe(({plugins:_,names:o,directory:n,nodes:s})=>{this.initCrushNodeSelection(s,this.form.get("crushRoot"),this.form.get("crushFailureDomain"),this.form.get("crushDeviceClass")),this.plugins=_,this.names=o,this.form.silentSet("directory",n),this.preValidateNumericInputFields()})}preValidateNumericInputFields(){const _=["k","m","l","c","d"].map(o=>this.form.get(o));_.forEach(o=>{o.markAsTouched(),o.markAsDirty()}),_[1].updateValueAndValidity()}onSubmit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _=this.createJson();this.taskWrapper.wrapTaskAroundCall({task:new v.R("ecp/create",{name:_.name}),call:this.ecpService.create(_)}).subscribe({error:()=>{this.form.setErrors({cdSubmitButton:!0})},complete:()=>{this.activeModal.close(),this.submitAction.emit(_)}})}createJson(){const _={technique:[this.PLUGIN.ISA,this.PLUGIN.JERASURE,this.PLUGIN.CLAY],packetSize:[this.PLUGIN.JERASURE],l:[this.PLUGIN.LRC],crushLocality:[this.PLUGIN.LRC],c:[this.PLUGIN.SHEC],d:[this.PLUGIN.CLAY],scalar_mds:[this.PLUGIN.CLAY]},o=new D_,n=this.form.getValue("plugin");return Object.keys(this.form.controls).filter(s=>{const c=_[s],d=this.form.getValue(s);return(c&&c.includes(n)||!c)&&d&&""!==d}).forEach(s=>{this.extendJson(s,o)}),o}extendJson(_,o){const s=this.form.getValue(_);o[{crushFailureDomain:"crush-failure-domain",crushRoot:"crush-root",crushDeviceClass:"crush-device-class",packetSize:"packetsize",crushLocality:"crush-locality"}[_]||_]="crushRoot"===_?s.name:s}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ve.O),e.Y36(g.Kz),e.Y36(de.P),e.Y36(Me),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-erasure-code-profile-form-modal"]],outputs:{submitAction:"submitAction"},features:[e.qOj],decls:98,vars:53,consts:function(){let i,_,o,n,s,c,d,f,p,R,h,S,m,P,A,I,$,D,x,y,Z,U,H,G,z,q,X,w,Q,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ie,ne,se,ae,le,re,ce;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Plugin",n="Data chunks (k)",s="Coding chunks (m)",c="Crush failure domain",d="Crush root",f="Crush device class",p="Let Ceph decide",R="Available OSDs: " + "\ufffd0\ufffd" + "",h="Directory",S="This field is required!",m="The name can only consist of alphanumeric characters, dashes and underscores.",P="The chosen erasure code profile name is already in use.",A="Loading...",I="This field is required!",$="This field is required!",D="Must be equal to or greater than 2.",x="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",y="For an equal distribution k has to be a multiple of (k+m)/l.",Z="K has to be equal to or greater than m in order to recover data correctly through c.",U="Distribution factor: " + "\ufffd0\ufffd" + "",H="This field is required!",G="Must be equal to or greater than 1.",z="Chunks (k+m) have exceeded the available OSDs of " + "\ufffd0\ufffd" + ".",q="Durability estimator (c)",X="Must be equal to or greater than 1.",w="C has to be equal to or lower than m as m defines the amount of chunks that can be used.",Q="Helper chunks (d)",J="Set d manually or use the plugin's default calculation that maximizes d.",k="D is automatically updated on k and m changes",V="D can be set from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + "",Y="D can only be set to " + "\ufffd0\ufffd" + "",B="D has to be greater than k (" + "\ufffd0\ufffd" + ").",j="D has to be lower than k + m (" + "\ufffd0\ufffd" + ").",K="Locality (l)",N="Locality groups: " + "\ufffd0\ufffd" + "",W="This field is required!",ee="Must be equal to or greater than 1.",_e="Can't split up chunks (k+m) correctly with the current locality.",oe="Loading...",te="Crush Locality",ie="Loading...",ne="None",se="Scalar mds",ae="Technique",le="Packetsize",re="Must be equal to or greater than 1.",ce="Loading...",[[3,"modalRef"],[1,"modal-title"],i,[1,"modal-content"],["novalidate","",3,"formGroup"],["frm","ngForm"],[1,"modal-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label"],_,[1,"cd-col-form-input"],["type","text","id","name","name","name","placeholder","Name...","formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","plugin",1,"cd-col-form-label"],[1,"required"],o,[3,"html"],["id","plugin","name","plugin","formControlName","plugin",1,"form-select"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["for","k",1,"cd-col-form-label"],n,["type","number","id","k","name","k","ng-model","$ctrl.erasureCodeProfile.k","placeholder","Data chunks...","formControlName","k","min","2",1,"form-control"],["class","form-text text-muted",4,"ngIf"],["for","m",1,"cd-col-form-label"],s,["type","number","id","m","name","m","placeholder","Coding chunks...","formControlName","m","min","1",1,"form-control"],["class","form-group row",4,"ngIf"],["for","crushFailureDomain",1,"cd-col-form-label"],c,["id","crushFailureDomain","name","crushFailureDomain","formControlName","crushFailureDomain",1,"form-select"],["for","crushRoot",1,"cd-col-form-label"],d,["id","crushRoot","name","crushRoot","formControlName","crushRoot",1,"form-select"],["for","crushDeviceClass",1,"cd-col-form-label"],f,["id","crushDeviceClass","name","crushDeviceClass","formControlName","crushDeviceClass",1,"form-select"],["ngValue",""],p,[1,"form-text","text-muted"],R,["for","directory",1,"cd-col-form-label"],h,["type","text","id","directory","name","directory","placeholder","Path...","formControlName","directory",1,"form-control"],[1,"modal-footer"],[3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],S,m,P,A,[3,"ngValue"],I,$,D,x,y,Z,U,H,G,z,["for","c",1,"cd-col-form-label"],q,["type","number","id","c","name","c","placeholder","Coding chunks...","formControlName","c","min","1",1,"form-control"],X,w,["for","d",1,"cd-col-form-label"],Q,[1,"input-group"],["type","number","id","d","name","d","placeholder","Helper chunks...","formControlName","d",1,"form-control"],["id","d-calc-btn","ngbTooltip",J,"type","button",1,"btn","btn-light",3,"click"],["aria-hidden","true",3,"ngClass"],[4,"ngIf"],k,V,Y,B,j,["for","l",1,"cd-col-form-label"],K,["type","number","id","l","name","l","placeholder","Coding chunks...","formControlName","l","min","1",1,"form-control"],N,W,ee,_e,oe,["for","crushLocality",1,"cd-col-form-label"],te,["id","crushLocality","name","crushLocality","formControlName","crushLocality",1,"form-select"],ie,ne,["for","scalar_mds",1,"cd-col-form-label"],se,["id","scalar_mds","name","scalar_mds","formControlName","scalar_mds",1,"form-select"],["for","technique",1,"cd-col-form-label"],ae,["id","technique","name","technique","formControlName","technique",1,"form-select"],["for","packetSize",1,"cd-col-form-label"],le,["type","number","id","packetSize","name","packetSize","placeholder","Packetsize...","formControlName","packetSize","min","1",1,"form-control"],re,ce]},template:function(_,o){if(1&_&&(e.TgZ(0,"cd-modal",0),e.ynx(1,1),e.SDv(2,2),e.ALo(3,"titlecase"),e.ALo(4,"upperFirst"),e.BQk(),e.ynx(5,3),e.TgZ(6,"form",4,5)(8,"div",6)(9,"div",7)(10,"label",8),e.SDv(11,9),e.qZA(),e.TgZ(12,"div",10),e._UZ(13,"input",11),e.YNc(14,y_,2,0,"span",12),e.YNc(15,Z_,2,0,"span",12),e.YNc(16,U_,2,0,"span",12),e.qZA()(),e.TgZ(17,"div",7)(18,"label",13)(19,"span",14),e.SDv(20,15),e.qZA(),e._UZ(21,"cd-helper",16),e.qZA(),e.TgZ(22,"div",10)(23,"select",17),e.YNc(24,H_,2,0,"option",18),e.YNc(25,G_,2,2,"option",19),e.qZA(),e.YNc(26,z_,2,0,"span",12),e.qZA()(),e.TgZ(27,"div",7)(28,"label",20)(29,"span",14),e.SDv(30,21),e.qZA(),e._UZ(31,"cd-helper",16),e.qZA(),e.TgZ(32,"div",10),e._UZ(33,"input",22),e.YNc(34,q_,2,0,"span",12),e.YNc(35,X_,2,0,"span",12),e.YNc(36,w_,2,1,"span",12),e.YNc(37,Q_,2,0,"span",12),e.YNc(38,J_,2,0,"span",12),e.YNc(39,k_,2,1,"span",23),e.qZA()(),e.TgZ(40,"div",7)(41,"label",24)(42,"span",14),e.SDv(43,25),e.qZA(),e._UZ(44,"cd-helper",16),e.qZA(),e.TgZ(45,"div",10),e._UZ(46,"input",26),e.YNc(47,V_,2,0,"span",12),e.YNc(48,Y_,2,0,"span",12),e.YNc(49,B_,2,1,"span",12),e.qZA()(),e.YNc(50,W_,9,3,"div",27),e.YNc(51,so,14,6,"div",27),e.YNc(52,co,12,5,"div",27),e.TgZ(53,"div",7)(54,"label",28),e.ynx(55),e.SDv(56,29),e.BQk(),e._UZ(57,"cd-helper",16),e.qZA(),e.TgZ(58,"div",10)(59,"select",30),e.YNc(60,Oo,2,0,"option",18),e.YNc(61,uo,2,3,"option",19),e.qZA()()(),e.YNc(62,go,10,4,"div",27),e.YNc(63,Ro,8,6,"div",27),e.YNc(64,Co,8,2,"div",27),e.YNc(65,ho,8,2,"div",27),e.TgZ(66,"div",7)(67,"label",31),e.ynx(68),e.SDv(69,32),e.BQk(),e._UZ(70,"cd-helper",16),e.qZA(),e.TgZ(71,"div",10)(72,"select",33),e.YNc(73,So,2,0,"option",18),e.YNc(74,To,2,2,"option",19),e.qZA()()(),e.TgZ(75,"div",7)(76,"label",34),e.ynx(77),e.SDv(78,35),e.BQk(),e._UZ(79,"cd-helper",16),e.qZA(),e.TgZ(80,"div",10)(81,"select",36)(82,"option",37),e.SDv(83,38),e.qZA(),e.YNc(84,Lo,2,2,"option",19),e.qZA(),e.TgZ(85,"span",39),e.SDv(86,40),e.qZA()()(),e.TgZ(87,"div",7)(88,"label",41),e.ynx(89),e.SDv(90,42),e.BQk(),e._UZ(91,"cd-helper",16),e.qZA(),e.TgZ(92,"div",10),e._UZ(93,"input",43),e.qZA()()(),e.TgZ(94,"div",44)(95,"cd-form-button-panel",45),e.NdJ("submitActionEvent",function(){return o.onSubmit()}),e.ALo(96,"titlecase"),e.ALo(97,"upperFirst"),e.qZA()()(),e.BQk(),e.qZA()),2&_){const n=e.MAs(7);e.Q6J("modalRef",o.activeModal),e.xp6(4),e.pQV(e.lcZ(3,41,o.action))(e.lcZ(4,43,o.resource)),e.QtT(2),e.xp6(2),e.Q6J("formGroup",o.form),e.xp6(8),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"pattern")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"uniqueName")),e.xp6(5),e.Q6J("html",o.tooltips.plugins[o.plugin].description),e.xp6(3),e.Q6J("ngIf",!o.plugins),e.xp6(1),e.Q6J("ngForOf",o.plugins),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",n,"required")),e.xp6(5),e.Q6J("html",o.tooltips.k),e.xp6(3),e.Q6J("ngIf",o.form.showError("k",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"max")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"unequal")),e.xp6(1),e.Q6J("ngIf",o.form.showError("k",n,"kLowerM")),e.xp6(1),e.Q6J("ngIf","lrc"===o.plugin),e.xp6(5),e.Q6J("html",o.tooltips.m),e.xp6(3),e.Q6J("ngIf",o.form.showError("m",n,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",n,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("m",n,"max")),e.xp6(1),e.Q6J("ngIf","shec"===o.plugin),e.xp6(1),e.Q6J("ngIf","clay"===o.plugin),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(5),e.Q6J("html",o.tooltips.crushFailureDomain),e.xp6(3),e.Q6J("ngIf",!o.failureDomains),e.xp6(1),e.Q6J("ngForOf",o.failureDomainKeys),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.LRC),e.xp6(1),e.Q6J("ngIf",o.PLUGIN.CLAY===o.plugin),e.xp6(1),e.Q6J("ngIf",e.kEZ(49,De,o.PLUGIN.JERASURE,o.PLUGIN.ISA,o.PLUGIN.CLAY).includes(o.plugin)),e.xp6(1),e.Q6J("ngIf",o.plugin===o.PLUGIN.JERASURE),e.xp6(5),e.Q6J("html",o.tooltips.crushRoot),e.xp6(3),e.Q6J("ngIf",!o.buckets),e.xp6(1),e.Q6J("ngForOf",o.buckets),e.xp6(5),e.Q6J("html",o.tooltips.crushDeviceClass),e.xp6(5),e.Q6J("ngForOf",o.devices),e.xp6(2),e.pQV(o.deviceCount),e.QtT(86),e.xp6(5),e.Q6J("html",o.tooltips.directory),e.xp6(4),e.Q6J("form",o.form)("submitText",e.lcZ(96,45,o.action)+" "+e.lcZ(97,47,o.resource))}},directives:[$e.z,a._Y,a.JL,Pe.V,a.sg,fe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,Re.S,a.EJ,a.YN,a.Kr,C.sg,a.wV,a.qQ,g._L,C.mk,me.p],pipes:[C.rS,Ce.m],styles:[""]}),t})();var Fo=r(7022);class No{constructor(){this.erasureInfo=!1,this.crushInfo=!1,this.pgs=1,this.poolTypes=["erasure","replicated"],this.applications={selected:[],default:["cephfs","rbd","rgw"],available:[],validators:[a.kI.pattern("[A-Za-z0-9_]+"),a.kI.maxLength(128)],messages:new Fo.a({empty:"No applications added",selectionLimit:{text:"Applications limit reached",tooltip:"A pool can only have up to four applications definitions."},customValidations:{pattern:"Allowed characters '_a-zA-Z0-9'",maxlength:"Maximum length is 128 characters"},filter:"Filter or add applications'",add:"Add application"})}}}var xe=r(63285),he=r(47640),bo=r(63622),vo=r(30490),Io=r(60192),ye=r(61350),$o=r(17932),Do=r(60950);const xo=["crushInfoTabs"],yo=["crushDeletionBtn"],Zo=["ecpInfoTabs"],Uo=["ecpDeletionBtn"];function Ho(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,42),e.qZA())}function Go(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,43),e.qZA())}function zo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,44),e.qZA())}function qo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,45),e.qZA())}function Xo(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function wo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,47),e.qZA())}function Qo(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Jo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,58),e.qZA())}function ko(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,59),e.qZA())}function Vo(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,60),e.qZA())}function Yo(t,i){1&t&&(e.TgZ(0,"span",55),e.SDv(1,61),e.qZA())}function Bo(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",8)(1,"label",52),e.SDv(2,53),e.qZA(),e.TgZ(3,"div",11)(4,"input",54),e.NdJ("focus",function(){return e.CHM(_),e.oxw(3).externalPgChange=!1})("blur",function(){return e.CHM(_),e.oxw(3).alignPgs()}),e.qZA(),e.YNc(5,Jo,2,0,"span",13),e.YNc(6,ko,2,0,"span",13),e.YNc(7,Vo,2,0,"span",13),e.TgZ(8,"span",55),e._UZ(9,"cd-doc",56),e.qZA(),e.YNc(10,Yo,2,0,"span",57),e.qZA()()}if(2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(5),e.Q6J("ngIf",o.form.showError("pgNum",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("pgNum",_,"34")),e.xp6(3),e.Q6J("ngIf",o.externalPgChange)}}function jo(t,i){if(1&t&&(e.TgZ(0,"span",41)(1,"ul",66)(2,"li"),e.SDv(3,67),e.qZA(),e.TgZ(4,"li"),e.SDv(5,68),e.qZA()()()),2&t){const _=e.oxw(4);e.xp6(3),e.pQV(_.getMinSize()),e.QtT(3),e.xp6(2),e.pQV(_.getMaxSize()),e.QtT(5)}}function Ko(t,i){if(1&t&&(e.TgZ(0,"span",41),e.SDv(1,69),e.qZA()),2&t){const _=e.oxw(4);e.xp6(1),e.pQV(_.getMinSize())(_.getMaxSize()),e.QtT(1)}}function Wo(t,i){1&t&&(e.TgZ(0,"span",70),e.SDv(1,71),e.qZA())}function et(t,i){if(1&t&&(e.TgZ(0,"div",8)(1,"label",62),e.SDv(2,63),e.qZA(),e.TgZ(3,"div",11),e._UZ(4,"input",64),e.YNc(5,jo,6,2,"span",13),e.YNc(6,Ko,2,2,"span",13),e.YNc(7,Wo,2,0,"span",65),e.qZA()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(4),e.Q6J("max",o.getMaxSize())("min",o.getMinSize()),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",o.form.showError("size",_)),e.xp6(1),e.Q6J("ngIf",1===o.form.getValue("size"))}}function _t(t,i){1&t&&(e.TgZ(0,"div",8)(1,"label",72),e.SDv(2,73),e.qZA(),e.TgZ(3,"div",11)(4,"div",74),e._UZ(5,"input",75),e.TgZ(6,"label",76),e.SDv(7,77),e.qZA()()()())}function ot(t,i){if(1&t&&(e.TgZ(0,"div")(1,"div",8)(2,"label",48),e.SDv(3,49),e.qZA(),e.TgZ(4,"div",11)(5,"select",50),e.YNc(6,Qo,2,2,"option",19),e.qZA()()(),e.YNc(7,Bo,11,4,"div",51),e.YNc(8,et,8,5,"div",51),e.YNc(9,_t,8,0,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(6),e.Q6J("ngForOf",_.pgAutoscaleModes),e.xp6(1),e.Q6J("ngIf","on"!==_.form.getValue("pgAutoscaleMode")),e.xp6(1),e.Q6J("ngIf",_.isReplicated),e.xp6(1),e.Q6J("ngIf",_.info.is_all_bluestore&&_.isErasure)}}function tt(t,i){if(1&t&&e._UZ(0,"i",78),2&t){const _=e.oxw(2);e.Gre("",_.icons.warning," icon-warning-color")}}function it(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,92),e.qZA())}function nt(t,i){1&t&&(e.TgZ(0,"option",93),e.SDv(1,94),e.qZA()),2&t&&e.Q6J("ngValue",null)}function st(t,i){1&t&&(e.TgZ(0,"option",93),e.SDv(1,95),e.qZA()),2&t&&e.Q6J("ngValue",null)}function at(t,i){if(1&t&&(e.TgZ(0,"option",93),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.name," ")}}const F=function(t){return[t]};function lt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",96),e.NdJ("click",function(){return e.CHM(_),e.oxw(4).addErasureCodeProfile()}),e._UZ(1,"i",88),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function rt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",97,98),e.NdJ("click",function(){return e.CHM(_),e.oxw(4).deleteErasureCodeProfile()}),e._UZ(2,"i",88),e.qZA()}if(2&t){const _=e.oxw(4);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const ct=function(){return["name"]};function Ot(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",109),2&t){const _=e.oxw(5);e.Q6J("renderObjects",!0)("hideKeys",e.DdM(4,ct))("data",_.form.getValue("erasureProfile"))("autoReload",!1)}}function dt(t,i){1&t&&(e.TgZ(0,"span"),e.SDv(1,112),e.qZA())}function ut(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.xp6(1),e.hij(" ",_," ")}}function Pt(t,i){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,ut,2,1,"li",113),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.ecpUsage)}}function ft(t,i){if(1&t&&(e.YNc(0,dt,2,0,"ng-template",null,110,e.W1O),e.YNc(2,Pt,2,1,"ul",111)),2&t){const _=e.MAs(1),o=e.oxw(5);e.xp6(2),e.Q6J("ngIf",o.ecpUsage)("ngIfElse",_)}}function Et(t,i){if(1&t&&(e.TgZ(0,"span",99)(1,"nav",100,101),e.ynx(3,102),e.TgZ(4,"a",103),e.SDv(5,104),e.qZA(),e.YNc(6,Ot,1,5,"ng-template",105),e.BQk(),e.ynx(7,106),e.TgZ(8,"a",103),e.SDv(9,107),e.qZA(),e.YNc(10,ft,3,2,"ng-template",105),e.BQk(),e.qZA(),e._UZ(11,"div",108),e.qZA()),2&t){const _=e.MAs(2);e.xp6(11),e.Q6J("ngbNavOutlet",_)}}const Ze=function(t){return{active:t}};function gt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",8)(1,"label",80),e.SDv(2,81),e.qZA(),e.TgZ(3,"div",11)(4,"div",82)(5,"select",83),e.YNc(6,it,2,0,"option",84),e.YNc(7,nt,2,1,"option",85),e.YNc(8,st,2,1,"option",85),e.YNc(9,at,2,2,"option",86),e.qZA(),e.TgZ(10,"button",87),e.NdJ("click",function(){e.CHM(_);const n=e.oxw(3);return n.data.erasureInfo=!n.data.erasureInfo}),e._UZ(11,"i",88),e.qZA(),e.YNc(12,lt,2,3,"button",89),e.YNc(13,rt,3,3,"button",90),e.qZA(),e.YNc(14,Et,12,1,"span",91),e.qZA()()}if(2&t){const _=e.oxw(3);e.xp6(6),e.Q6J("ngIf",!_.ecProfiles),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&0===_.ecProfiles.length),e.xp6(1),e.Q6J("ngIf",_.ecProfiles&&_.ecProfiles.length>0),e.xp6(1),e.Q6J("ngForOf",_.ecProfiles),e.xp6(1),e.Q6J("ngClass",e.VKq(9,Ze,_.data.erasureInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,_.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",!_.editing),e.xp6(1),e.Q6J("ngIf",_.data.erasureInfo&&_.form.getValue("erasureProfile"))}}function pt(t,i){1&t&&(e.TgZ(0,"div",8)(1,"label",114),e.SDv(2,115),e.qZA(),e.TgZ(3,"div",11)(4,"span",55),e.SDv(5,116),e.qZA()()())}function Rt(t,i){1&t&&(e.TgZ(0,"span",55)(1,"span"),e.SDv(2,119),e.qZA(),e._uU(3,"\xa0 "),e.qZA())}function mt(t,i){if(1&t&&(e.TgZ(0,"option",93),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("ngValue",_),e.xp6(1),e.hij(" ",_.rule_name," ")}}function Ct(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",96),e.NdJ("click",function(){return e.CHM(_),e.oxw(5).addCrushRule()}),e._UZ(1,"i",88),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(1),e.Q6J("ngClass",e.VKq(1,F,_.icons.add))}}function Mt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"button",126,127),e.NdJ("click",function(){return e.CHM(_),e.oxw(5).deleteCrushRule()}),e._UZ(2,"i",88),e.qZA()}if(2&t){const _=e.oxw(5);e.xp6(2),e.Q6J("ngClass",e.VKq(1,F,_.icons.trash))}}const ht=function(){return["steps","type","rule_name"]};function St(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",109),2&t){const _=e.oxw(6);e.Q6J("renderObjects",!1)("hideKeys",e.DdM(4,ht))("data",_.form.getValue("crushRule"))("autoReload",!1)}}function Tt(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit,o=e.oxw(7);e.xp6(1),e.hij(" ",o.describeCrushStep(_)," ")}}function Lt(t,i){if(1&t&&(e.TgZ(0,"ol"),e.YNc(1,Tt,2,1,"li",113),e.qZA()),2&t){const _=e.oxw(6);e.xp6(1),e.Q6J("ngForOf",_.form.get("crushRule").value.steps)}}function At(t,i){1&t&&(e.TgZ(0,"span"),e.SDv(1,136),e.qZA())}function Ft(t,i){if(1&t&&(e.TgZ(0,"li"),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.xp6(1),e.hij(" ",_," ")}}function Nt(t,i){if(1&t&&(e.TgZ(0,"ul"),e.YNc(1,Ft,2,1,"li",113),e.qZA()),2&t){const _=e.oxw(7);e.xp6(1),e.Q6J("ngForOf",_.crushUsage)}}function bt(t,i){if(1&t&&(e.YNc(0,At,2,0,"ng-template",null,135,e.W1O),e.YNc(2,Nt,2,1,"ul",111)),2&t){const _=e.MAs(1),o=e.oxw(6);e.xp6(2),e.Q6J("ngIf",o.crushUsage)("ngIfElse",_)}}function vt(t,i){if(1&t&&(e.TgZ(0,"div",128)(1,"nav",100,129),e.ynx(3,130),e.TgZ(4,"a",103),e.SDv(5,131),e.qZA(),e.YNc(6,St,1,5,"ng-template",105),e.BQk(),e.ynx(7,132),e.TgZ(8,"a",103),e.SDv(9,133),e.qZA(),e.YNc(10,Lt,2,1,"ng-template",105),e.BQk(),e.ynx(11,106),e.TgZ(12,"a",103),e.SDv(13,134),e.qZA(),e.YNc(14,bt,3,2,"ng-template",105),e.BQk(),e.qZA(),e._UZ(15,"div",108),e.qZA()),2&t){const _=e.MAs(2);e.xp6(15),e.Q6J("ngbNavOutlet",_)}}function It(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,137),e.qZA())}function $t(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,138),e.qZA())}function Dt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div")(1,"div",120)(2,"select",121)(3,"option",93),e.SDv(4,122),e.qZA(),e.YNc(5,mt,2,2,"option",86),e.qZA(),e.TgZ(6,"button",123),e.NdJ("click",function(){e.CHM(_);const n=e.oxw(4);return n.data.crushInfo=!n.data.crushInfo}),e._UZ(7,"i",88),e.qZA(),e.YNc(8,Ct,2,3,"button",89),e.YNc(9,Mt,3,3,"button",124),e.qZA(),e.YNc(10,vt,16,1,"div",125),e.YNc(11,It,2,0,"span",13),e.YNc(12,$t,2,0,"span",13),e.qZA()}if(2&t){e.oxw(3);const _=e.MAs(2),o=e.oxw();e.xp6(3),e.Q6J("ngValue",null),e.xp6(2),e.Q6J("ngForOf",o.current.rules),e.xp6(1),e.Q6J("ngClass",e.VKq(9,Ze,o.data.crushInfo)),e.xp6(1),e.Q6J("ngClass",e.VKq(11,F,o.icons.questionCircle)),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.isReplicated&&!o.editing),e.xp6(1),e.Q6J("ngIf",o.data.crushInfo&&o.form.getValue("crushRule")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("crushRule",_,"tooFewOsds"))}}function xt(t,i){if(1&t&&(e.TgZ(0,"div",8)(1,"label",114),e.SDv(2,117),e.qZA(),e.TgZ(3,"div",11),e.YNc(4,Rt,4,0,"ng-template",null,118,e.W1O),e.YNc(6,Dt,13,13,"div",111),e.qZA()()),2&t){const _=e.MAs(5),o=e.oxw(3);e.xp6(6),e.Q6J("ngIf",o.current.rules.length>0)("ngIfElse",_)}}function yt(t,i){if(1&t&&(e.TgZ(0,"div")(1,"legend"),e.SDv(2,79),e.qZA(),e.YNc(3,gt,15,13,"div",51),e.YNc(4,pt,6,0,"div",51),e.YNc(5,xt,7,2,"div",51),e.qZA()),2&t){const _=e.oxw(2);e.xp6(3),e.Q6J("ngIf",_.isErasure),e.xp6(1),e.Q6J("ngIf",_.isErasure&&!_.editing),e.xp6(1),e.Q6J("ngIf",_.isReplicated||_.editing)}}function Zt(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function Ut(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,156),e.qZA())}function Ht(t,i){1&t&&(e.TgZ(0,"option",17),e.SDv(1,157),e.qZA())}function Gt(t,i){if(1&t&&(e.TgZ(0,"option",46),e._uU(1),e.qZA()),2&t){const _=i.$implicit;e.Q6J("value",_),e.xp6(1),e.hij(" ",_," ")}}function zt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,158),e.qZA())}function qt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,159),e.qZA())}function Xt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,160),e.qZA())}function wt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,161),e.qZA())}function Qt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,162),e.qZA())}function Jt(t,i){if(1&t&&(e.TgZ(0,"div")(1,"div",8)(2,"label",144),e.SDv(3,145),e.qZA(),e.TgZ(4,"div",11)(5,"select",146),e.YNc(6,Ut,2,0,"option",84),e.YNc(7,Ht,2,0,"option",84),e.YNc(8,Gt,2,2,"option",19),e.qZA()()(),e.TgZ(9,"div",8)(10,"label",147),e.SDv(11,148),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",149),e.YNc(14,zt,2,0,"span",13),e.YNc(15,qt,2,0,"span",13),e.qZA()(),e.TgZ(16,"div",8)(17,"label",150),e.SDv(18,151),e.qZA(),e.TgZ(19,"div",11),e._UZ(20,"input",152),e.YNc(21,Xt,2,0,"span",13),e.YNc(22,wt,2,0,"span",13),e.qZA()(),e.TgZ(23,"div",8)(24,"label",153),e.SDv(25,154),e.qZA(),e.TgZ(26,"div",11),e._UZ(27,"input",155),e.YNc(28,Qt,2,0,"span",13),e.qZA()()()),2&t){e.oxw(2);const _=e.MAs(2),o=e.oxw();e.xp6(6),e.Q6J("ngIf",!o.info.compression_algorithms),e.xp6(1),e.Q6J("ngIf",o.info.compression_algorithms&&0===o.info.compression_algorithms.length),e.xp6(1),e.Q6J("ngForOf",o.info.compression_algorithms),e.xp6(6),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("minBlobSize",_,"maximum")),e.xp6(6),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"min")),e.xp6(1),e.Q6J("ngIf",o.form.showError("maxBlobSize",_,"minimum")),e.xp6(6),e.Q6J("ngIf",o.form.showError("ratio",_,"min")||o.form.showError("ratio",_,"max"))}}function kt(t,i){if(1&t&&(e.TgZ(0,"div",139)(1,"legend"),e.SDv(2,140),e.qZA(),e.TgZ(3,"div",8)(4,"label",141),e.SDv(5,142),e.qZA(),e.TgZ(6,"div",11)(7,"select",143),e.YNc(8,Zt,2,2,"option",19),e.qZA()()(),e.YNc(9,Jt,29,8,"div",20),e.qZA()),2&t){const _=e.oxw(2);e.xp6(8),e.Q6J("ngForOf",_.info.compression_modes),e.xp6(1),e.Q6J("ngIf",_.hasCompressionEnabled())}}function Vt(t,i){1&t&&(e.TgZ(0,"span",41),e.SDv(1,163),e.qZA())}function Yt(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"div",1)(1,"form",2,3)(3,"div",4)(4,"div",5),e.SDv(5,6),e.ALo(6,"titlecase"),e.ALo(7,"upperFirst"),e.qZA(),e.TgZ(8,"div",7)(9,"div",8)(10,"label",9),e.SDv(11,10),e.qZA(),e.TgZ(12,"div",11),e._UZ(13,"input",12),e.YNc(14,Ho,2,0,"span",13),e.YNc(15,Go,2,0,"span",13),e.YNc(16,zo,2,0,"span",13),e.YNc(17,qo,2,0,"span",13),e.qZA()(),e.TgZ(18,"div",8)(19,"label",14),e.SDv(20,15),e.qZA(),e.TgZ(21,"div",11)(22,"select",16)(23,"option",17),e.SDv(24,18),e.qZA(),e.YNc(25,Xo,2,2,"option",19),e.qZA(),e.YNc(26,wo,2,0,"span",13),e.qZA()(),e.YNc(27,ot,10,4,"div",20),e.TgZ(28,"div",8)(29,"label",21),e.SDv(30,22),e.qZA(),e.TgZ(31,"div",11)(32,"cd-select-badges",23),e.NdJ("selection",function(){return e.CHM(_),e.oxw().appSelection()}),e.qZA(),e.YNc(33,tt,1,3,"i",24),e.qZA()(),e.YNc(34,yt,6,3,"div",20),e.YNc(35,kt,10,2,"div",25),e.TgZ(36,"div")(37,"legend"),e.SDv(38,26),e.qZA(),e.TgZ(39,"div",8)(40,"label",27),e.ynx(41),e.SDv(42,28),e.BQk(),e.TgZ(43,"cd-helper")(44,"span"),e.SDv(45,29),e.qZA(),e._UZ(46,"br"),e.TgZ(47,"span"),e.SDv(48,30),e.qZA()()(),e.TgZ(49,"div",11),e._UZ(50,"input",31),e.qZA()(),e.TgZ(51,"div",8)(52,"label",32),e.ynx(53),e.SDv(54,33),e.BQk(),e.TgZ(55,"cd-helper")(56,"span"),e.SDv(57,34),e.qZA(),e._UZ(58,"br"),e.TgZ(59,"span"),e.SDv(60,35),e.qZA()()(),e.TgZ(61,"div",11),e._UZ(62,"input",36),e.YNc(63,Vt,2,0,"span",13),e.qZA()()(),e.TgZ(64,"div",37)(65,"cd-rbd-configuration-form",38),e.NdJ("changes",function(n){return e.CHM(_),e.oxw().currentConfigurationValues=n()}),e.qZA()()(),e.TgZ(66,"div",39)(67,"cd-form-button-panel",40),e.NdJ("submitActionEvent",function(){return e.CHM(_),e.oxw().submit()}),e.ALo(68,"titlecase"),e.ALo(69,"upperFirst"),e.qZA()()()()()}if(2&t){const _=e.MAs(2),o=e.oxw();e.xp6(1),e.Q6J("formGroup",o.form),e.xp6(6),e.pQV(e.lcZ(6,25,o.action))(e.lcZ(7,27,o.resource)),e.QtT(5),e.xp6(7),e.Q6J("ngIf",o.form.showError("name",_,"required")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"uniqueName")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"rbdPool")),e.xp6(1),e.Q6J("ngIf",o.form.showError("name",_,"pattern")),e.xp6(8),e.Q6J("ngForOf",o.data.poolTypes),e.xp6(1),e.Q6J("ngIf",o.form.showError("poolType",_,"required")),e.xp6(1),e.Q6J("ngIf",o.isReplicated||o.isErasure),e.xp6(5),e.Q6J("customBadges",!0)("customBadgeValidators",o.data.applications.validators)("messages",o.data.applications.messages)("data",o.data.applications.selected)("options",o.data.applications.available)("selectionLimit",4),e.xp6(1),e.Q6J("ngIf",o.data.applications.selected<=0),e.xp6(1),e.Q6J("ngIf",o.isErasure||o.isReplicated),e.xp6(1),e.Q6J("ngIf",o.info.is_all_bluestore),e.xp6(28),e.Q6J("ngIf",o.form.showError("max_objects",_,"min")),e.xp6(1),e.Q6J("hidden",o.isErasure||-1===o.data.applications.selected.indexOf("rbd")),e.xp6(1),e.Q6J("form",o.form)("initializeData",o.initializeConfigData),e.xp6(2),e.Q6J("form",o.form)("submitText",e.lcZ(68,29,o.action)+" "+e.lcZ(69,31,o.resource))}}let Ue=(()=>{class t extends R_.E{constructor(_,o,n,s,c,d,f,p,R,h,S){super(),this.dimlessBinaryPipe=_,this.route=o,this.router=n,this.modalService=s,this.poolService=c,this.authStorageService=d,this.formatter=f,this.taskWrapper=p,this.ecpService=R,this.crushRuleService=h,this.actionLabels=S,this.editing=!1,this.isReplicated=!1,this.isErasure=!1,this.data=new No,this.externalPgChange=!1,this.current={rules:[]},this.initializeConfigData=new E_.t(1),this.currentConfigurationValues={},this.icons=b.P,this.crushUsage=void 0,this.ecpUsage=void 0,this.crushRuleMaxSize=10,this.editing=this.router.url.startsWith(`/pool/${M.MQ.EDIT}`),this.action=this.editing?this.actionLabels.EDIT:this.actionLabels.CREATE,this.resource="pool",this.authenticate(),this.createForm()}authenticate(){if(this.permission=this.authStorageService.getPermissions().pool,!this.permission.read||!this.permission.update&&this.editing||!this.permission.create&&!this.editing)throw new g_._2}createForm(){const _=new be.d({mode:new a.NI("none"),algorithm:new a.NI(""),minBlobSize:new a.NI("",{updateOn:"blur"}),maxBlobSize:new a.NI("",{updateOn:"blur"}),ratio:new a.NI("",{updateOn:"blur"})});this.form=new be.d({name:new a.NI("",{validators:[a.kI.pattern(/^[.A-Za-z0-9_/-]+$/),a.kI.required,E.h.custom("rbdPool",()=>this.form&&this.form.getValue("name").includes("/")&&this.data&&-1!==this.data.applications.selected.indexOf("rbd"))]}),poolType:new a.NI("",{validators:[a.kI.required]}),crushRule:new a.NI(null,{validators:[E.h.custom("tooFewOsds",o=>this.info&&o&&this.info.osd_count<1),E.h.custom("required",o=>this.isReplicated&&this.info.crush_rules_replicated.length>0&&!o)]}),size:new a.NI("",{updateOn:"blur"}),erasureProfile:new a.NI(null),pgNum:new a.NI("",{validators:[a.kI.required]}),pgAutoscaleMode:new a.NI(null),ecOverwrites:new a.NI(!1),compression:_,max_bytes:new a.NI(""),max_objects:new a.NI(0)},[E.h.custom("form",()=>null)])}ngOnInit(){this.poolService.getInfo().subscribe(_=>{this.initInfo(_),this.editing?this.initEditMode():(this.setAvailableApps(),this.loadingReady()),this.listenToChanges(),this.setComplexValidators()})}initInfo(_){this.pgAutoscaleModes=_.pg_autoscale_modes,this.form.silentSet("pgAutoscaleMode",_.pg_autoscale_default_mode),this.form.silentSet("algorithm",_.bluestore_compression_algorithm),this.info=_,this.initEcp(_.erasure_code_profiles)}initEcp(_){this.setListControlStatus("erasureProfile",_),this.ecProfiles=_}setListControlStatus(_,o){const n=this.form.get(_),s=n.value;1!==o.length||s&&u().isEqual(s,o[0])?0===o.length&&s&&n.setValue(null):n.setValue(o[0]),o.length<=1?n.enabled&&n.disable():n.disabled&&n.enable()}initEditMode(){this.disableForEdit(),this.routeParamsSubscribe=this.route.params.subscribe(_=>this.poolService.get(_.name).subscribe(o=>{this.data.pool=o,this.initEditFormData(o),this.loadingReady()}))}disableForEdit(){["poolType","crushRule","size","erasureProfile","ecOverwrites"].forEach(_=>this.form.get(_).disable())}initEditFormData(_){this.initializeConfigData.next({initialData:_.configuration,sourceType:m_.h.pool}),this.poolTypeChange(_.type);const o=this.info.crush_rules_replicated.concat(this.info.crush_rules_erasure),n={name:_.pool_name,poolType:_.type,crushRule:o.find(s=>s.rule_name===_.crush_rule),size:_.size,erasureProfile:this.ecProfiles.find(s=>s.name===_.erasure_code_profile),pgAutoscaleMode:_.pg_autoscale_mode,pgNum:_.pg_num,ecOverwrites:_.flags_names.includes("ec_overwrites"),mode:_.options.compression_mode,algorithm:_.options.compression_algorithm,minBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_min_blob_size),maxBlobSize:this.dimlessBinaryPipe.transform(_.options.compression_max_blob_size),ratio:_.options.compression_required_ratio,max_bytes:this.dimlessBinaryPipe.transform(_.quota_max_bytes),max_objects:_.quota_max_objects};Object.keys(n).forEach(s=>{const c=n[s];!u().isUndefined(c)&&""!==c&&this.form.silentSet(s,c)}),this.data.pgs=this.form.getValue("pgNum"),this.setAvailableApps(this.data.applications.default.concat(_.application_metadata)),this.data.applications.selected=_.application_metadata}setAvailableApps(_=this.data.applications.default){this.data.applications.available=u().uniq(_.sort()).map(o=>new p_.$(!1,o,""))}listenToChanges(){this.listenToChangesDuringAddEdit(),this.editing||this.listenToChangesDuringAdd()}listenToChangesDuringAddEdit(){this.form.get("pgNum").valueChanges.subscribe(_=>{const o=_-this.data.pgs;1===Math.abs(o)&&2!==_?this.doPgPowerJump(o):this.data.pgs=_})}doPgPowerJump(_){const o=this.calculatePgPower()+_;this.setPgs(-1===_?Math.round(o):Math.floor(o))}calculatePgPower(_=this.form.getValue("pgNum")){return Math.log(_)/Math.log(2)}setPgs(_){const o=Math.pow(2,_<0?0:_);this.data.pgs=o,this.form.silentSet("pgNum",o)}listenToChangesDuringAdd(){this.form.get("poolType").valueChanges.subscribe(_=>{this.poolTypeChange(_)}),this.form.get("crushRule").valueChanges.subscribe(_=>{this.crushDeletionBtn&&this.crushDeletionBtn.isOpen()&&this.crushDeletionBtn.close(),_&&(this.setCorrectMaxSize(_),this.crushRuleIsUsedBy(_.rule_name),this.replicatedRuleChange(),this.pgCalc())}),this.form.get("size").valueChanges.subscribe(()=>{this.pgCalc()}),this.form.get("erasureProfile").valueChanges.subscribe(_=>{this.ecpDeletionBtn&&this.ecpDeletionBtn.isOpen()&&this.ecpDeletionBtn.close(),_&&(this.ecpIsUsedBy(_.name),this.pgCalc())}),this.form.get("mode").valueChanges.subscribe(()=>{["minBlobSize","maxBlobSize","ratio"].forEach(_=>{this.form.get(_).updateValueAndValidity({emitEvent:!1})})}),this.form.get("minBlobSize").valueChanges.subscribe(()=>{this.form.get("maxBlobSize").updateValueAndValidity({emitEvent:!1})}),this.form.get("maxBlobSize").valueChanges.subscribe(()=>{this.form.get("minBlobSize").updateValueAndValidity({emitEvent:!1})})}poolTypeChange(_){if("replicated"===_?this.setTypeBooleans(!0,!1):this.setTypeBooleans(!1,"erasure"===_),!_||!this.info)return void(this.current.rules=[]);const o=this.info["crush_rules_"+_]||[];this.current.rules=o,!this.editing&&(this.isReplicated&&this.setListControlStatus("crushRule",o),this.replicatedRuleChange(),this.pgCalc())}setTypeBooleans(_,o){this.isReplicated=_,this.isErasure=o}replicatedRuleChange(){if(!this.isReplicated)return;const _=this.form.get("size");let o=this.form.getValue("size")||3;const n=this.getMinSize(),s=this.getMaxSize();os&&(o=s),o!==_.value&&this.form.silentSet("size",o)}getMinSize(){return!this.info||this.info.osd_count<1?0:1}getMaxSize(){const _=this.form.getValue("crushRule");return this.info?_?_.usable_size:Math.min(this.info.osd_count,3):0}pgCalc(){const _=this.form.getValue("poolType");if(!this.info||this.form.get("pgNum").dirty||!_)return;const o=100*this.info.osd_count,n=this.isReplicated?this.replicatedPgCalc(o):this.erasurePgCalc(o);if(!n)return;const s=this.data.pgs;this.alignPgs(n),this.externalPgChange||(this.externalPgChange=s!==this.data.pgs)}setCorrectMaxSize(_=this.form.getValue("crushRule")){if(!_)return;const n=T.searchFailureDomains(this.info.nodes,_.steps[0].item_name)[_.steps[1].type];_.usable_size=Math.min(n?n.length:this.crushRuleMaxSize,this.crushRuleMaxSize)}replicatedPgCalc(_){const o=this.form.get("size"),n=o.value;return o.valid&&n>0?_/n:0}erasurePgCalc(_){const o=this.form.get("erasureProfile"),n=o.value;return(o.valid||o.disabled)&&n?_/(n.k+n.m):0}alignPgs(_=this.form.getValue("pgNum")){this.setPgs(Math.round(this.calculatePgPower(_<1?1:_)))}setComplexValidators(){this.editing?this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.data.pool&&this.info&&-1!==this.info.pool_names.indexOf(_)&&this.info.pool_names.indexOf(_)!==this.info.pool_names.indexOf(this.data.pool.pool_name))]):(E.h.validateIf(this.form.get("size"),()=>this.isReplicated,[E.h.custom("min",_=>this.form.getValue("size")&&_this.form.getValue("size")&&this.getMaxSize()<_)]),this.form.get("name").setValidators([this.form.get("name").validator,E.h.custom("uniqueName",_=>this.info&&-1!==this.info.pool_names.indexOf(_))])),this.setCompressionValidators()}setCompressionValidators(){E.h.validateIf(this.form.get("minBlobSize"),()=>this.hasCompressionEnabled(),[a.kI.min(0),E.h.custom("maximum",_=>this.oddBlobSize(_,this.form.getValue("maxBlobSize")))]),E.h.validateIf(this.form.get("maxBlobSize"),()=>this.hasCompressionEnabled(),[a.kI.min(0),E.h.custom("minimum",_=>this.oddBlobSize(this.form.getValue("minBlobSize"),_))]),E.h.validateIf(this.form.get("ratio"),()=>this.hasCompressionEnabled(),[a.kI.min(0),a.kI.max(1)])}oddBlobSize(_,o){const n=this.formatter.toBytes(_),s=this.formatter.toBytes(o);return Boolean(n&&s&&n>=s)}hasCompressionEnabled(){return this.form.getValue("mode")&&"none"!==this.form.get("mode").value.toLowerCase()}describeCrushStep(_){return[_.op.replace("_"," "),_.item_name||"",_.type?_.num+" type "+_.type:""].join(" ")}addErasureCodeProfile(){this.addModal(Ao,_=>this.reloadECPs(_))}addModal(_,o){this.hideOpenTooltips(),this.modalService.show(_).componentInstance.submitAction.subscribe(s=>{o(s.name)})}hideOpenTooltips(){const _=o=>o&&o.isOpen()&&o.close();_(this.ecpDeletionBtn),_(this.crushDeletionBtn)}reloadECPs(_){this.reloadList({newItemName:_,getInfo:()=>this.ecpService.list(),initInfo:o=>this.initEcp(o),findNewItem:()=>this.ecProfiles.find(o=>o.name===_),controlName:"erasureProfile"})}reloadList({newItemName:_,getInfo:o,initInfo:n,findNewItem:s,controlName:c}){this.modalSubscription&&this.modalSubscription.unsubscribe(),o().subscribe(d=>{if(n(d),!_)return;const f=s();f&&this.form.get(c).setValue(f)})}deleteErasureCodeProfile(){this.deletionModal({value:this.form.getValue("erasureProfile"),usage:this.ecpUsage,deletionBtn:this.ecpDeletionBtn,dataName:"erasureInfo",getTabs:()=>this.ecpInfoTabs,tabPosition:"used-by-pools",nameAttribute:"name",itemDescription:"erasure code profile",reloadFn:()=>this.reloadECPs(),deleteFn:_=>this.ecpService.delete(_),taskName:"ecp/delete"})}deletionModal({value:_,usage:o,deletionBtn:n,dataName:s,getTabs:c,tabPosition:d,nameAttribute:f,itemDescription:p,reloadFn:R,deleteFn:h,taskName:S}){if(!_)return;if(o)return n.animation=!1,n.toggle(),this.data[s]=!0,void setTimeout(()=>{const P=c();P&&P.select(d)},50);const m=_[f];this.modalService.show(Ne.M,{itemDescription:p,itemNames:[m],submitActionObservable:()=>{const P=h(m);return P.subscribe(()=>R()),this.taskWrapper.wrapTaskAroundCall({task:new v.R(S,{name:m}),call:P})}})}addCrushRule(){this.addModal($_,_=>this.reloadCrushRules(_))}reloadCrushRules(_){this.reloadList({newItemName:_,getInfo:()=>this.poolService.getInfo(),initInfo:o=>{this.initInfo(o),this.poolTypeChange("replicated")},findNewItem:()=>this.info.crush_rules_replicated.find(o=>o.rule_name===_),controlName:"crushRule"})}deleteCrushRule(){this.deletionModal({value:this.form.getValue("crushRule"),usage:this.crushUsage,deletionBtn:this.crushDeletionBtn,dataName:"crushInfo",getTabs:()=>this.crushInfoTabs,tabPosition:"used-by-pools",nameAttribute:"rule_name",itemDescription:"crush rule",reloadFn:()=>this.reloadCrushRules(),deleteFn:_=>this.crushRuleService.delete(_),taskName:"crushRule/delete"})}crushRuleIsUsedBy(_){this.crushUsage=_?this.info.used_rules[_]:void 0}ecpIsUsedBy(_){this.ecpUsage=_?this.info.used_profiles[_]:void 0}submit(){if(this.form.invalid)return void this.form.setErrors({cdSubmitButton:!0});const _={pool:this.form.getValue("name")};this.assignFormFields(_,[{externalFieldName:"pool_type",formControlName:"poolType"},{externalFieldName:"pg_autoscale_mode",formControlName:"pgAutoscaleMode",editable:!0},{externalFieldName:"pg_num",formControlName:"pgNum",replaceFn:n=>"on"===this.form.getValue("pgAutoscaleMode")?1:n,editable:!0},this.isReplicated?{externalFieldName:"size",formControlName:"size"}:{externalFieldName:"erasure_code_profile",formControlName:"erasureProfile",attr:"name"},{externalFieldName:"rule_name",formControlName:"crushRule",replaceFn:n=>this.isReplicated?n&&n.rule_name:void 0},{externalFieldName:"quota_max_bytes",formControlName:"max_bytes",replaceFn:this.formatter.toBytes,editable:!0,resetValue:this.editing?0:void 0},{externalFieldName:"quota_max_objects",formControlName:"max_objects",editable:!0,resetValue:this.editing?0:void 0}]),this.info.is_all_bluestore&&(this.assignFormField(_,{externalFieldName:"flags",formControlName:"ecOverwrites",replaceFn:()=>this.isErasure?["ec_overwrites"]:void 0}),"none"!==this.form.getValue("mode")?this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:n=>this.hasCompressionEnabled()&&n},{externalFieldName:"compression_algorithm",formControlName:"algorithm",editable:!0},{externalFieldName:"compression_min_blob_size",formControlName:"minBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_max_blob_size",formControlName:"maxBlobSize",replaceFn:this.formatter.toBytes,editable:!0,resetValue:0},{externalFieldName:"compression_required_ratio",formControlName:"ratio",editable:!0,resetValue:0}]):this.editing&&this.assignFormFields(_,[{externalFieldName:"compression_mode",formControlName:"mode",editable:!0,replaceFn:()=>"unset"},{externalFieldName:"srcpool",formControlName:"name",editable:!0,replaceFn:()=>this.data.pool.pool_name}]));const o=this.data.applications.selected;(o.length>0||this.editing)&&(_.application_metadata=o),this.isReplicated&&!u().isEmpty(this.currentConfigurationValues)&&(_.configuration=this.currentConfigurationValues),this.triggerApiTask(_)}assignFormFields(_,o){o.forEach(n=>this.assignFormField(_,n))}assignFormField(_,{externalFieldName:o,formControlName:n,attr:s,replaceFn:c,editable:d,resetValue:f}){if(this.editing&&(!d||this.form.get(n).pristine))return;const p=this.form.getValue(n);let R=c?c(p):s?u().get(p,s):p;if(!p||!R){if(!d||u().isUndefined(f))return;R=f}_[o]=R}triggerApiTask(_){this.taskWrapper.wrapTaskAroundCall({task:new v.R("pool/"+(this.editing?M.MQ.EDIT:M.MQ.CREATE),{pool_name:_.hasOwnProperty("srcpool")?_.srcpool:_.pool}),call:this.poolService[this.editing?M.MQ.UPDATE:M.MQ.CREATE](_)}).subscribe({error:o=>{u().isObject(o.error)&&"34"===o.error.code&&this.form.get("pgNum").setErrors({34:!0}),this.form.setErrors({cdSubmitButton:!0})},complete:()=>this.router.navigate(["/pool"])})}appSelection(){this.form.get("name").updateValueAndValidity({emitEvent:!1,onlySelf:!0})}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(C_.$),e.Y36(Oe.gz),e.Y36(Oe.F0),e.Y36(xe.Z),e.Y36(ue.q),e.Y36(he.j),e.Y36(M_.H),e.Y36(de.P),e.Y36(Me),e.Y36(Ie.H),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-form"]],viewQuery:function(_,o){if(1&_&&(e.Gf(xo,5),e.Gf(yo,5),e.Gf(Zo,5),e.Gf(Uo,5)),2&_){let n;e.iGM(n=e.CRH())&&(o.crushInfoTabs=n.first),e.iGM(n=e.CRH())&&(o.crushDeletionBtn=n.first),e.iGM(n=e.CRH())&&(o.ecpInfoTabs=n.first),e.iGM(n=e.CRH())&&(o.ecpDeletionBtn=n.first)}},features:[e.qOj],decls:1,vars:1,consts:function(){let i,_,o,n,s,c,d,f,p,R,h,S,m,P,A,I,$,D,x,y,Z,U,H,G,z,q,X,w,Q,J,k,V,Y,B,j,K,N,W,ee,_e,oe,te,ie,ne,se,ae,le,re,ce,O,Xe,we,Qe,Je,ke,Ve,Ye,Be,je,Ke,We,e_,__,o_,t_,i_,n_,s_,a_,l_,r_,c_,O_;return i="" + "\ufffd0\ufffd" + " " + "\ufffd1\ufffd" + "",_="Name",o="Name...",n="Pool type",s="-- Select a pool type --",c="Applications",d="Pools should be associated with an application tag",f="Quotas",p="Max bytes",R="Leave it blank or specify 0 to disable this quota.",h="A valid quota should be greater than 0.",S="e.g., 10GiB",m="Max objects",P="Leave it blank or specify 0 to disable this quota.",A="A valid quota should be greater than 0.",I="This field is required!",$="The chosen Ceph pool name is already in use.",D="It's not possible to create an RBD pool with '/' in the name. Please change the name or remove 'rbd' from the applications list.",x="Pool name can only contain letters, numbers, '.', '-', '_' or '/'.",y="This field is required!",Z="PG Autoscale",U="Placement groups",H="Calculation help",G="This field is required!",z="At least one placement group is needed!",q="Your cluster can't handle this many PGs. Please recalculate the PG amount needed.",X="The current PGs settings were calculated for you, you should make sure the values suit your needs before submit.",w="Replicated size",Q="Minimum: " + "\ufffd0\ufffd" + "",J="Maximum: " + "\ufffd0\ufffd" + "",k="The size specified is out of range. A value from " + "\ufffd0\ufffd" + " to " + "\ufffd1\ufffd" + " is usable.",V="A size of 1 will not create a replication of the object. The 'Replicated size' includes the object itself.",Y="Flags",B="EC Overwrites",j="CRUSH",K="Erasure code profile",N="This profile can't be deleted as it is in use.",W="Loading...",ee="-- No erasure code profile available --",_e="-- Select an erasure code profile --",oe="Profile",te="Used by pools",ie="Profile is not in use.",ne="Crush ruleset",se="A new crush ruleset will be implicitly created.",ae="Crush ruleset",le="There are no rules.",re="-- Select a crush rule --",ce="Placement and\n replication strategies or distribution policies that allow to\n specify how CRUSH places data replicas.",O="This rule can't be deleted as it is in use.",Xe="Crush rule",we="Crush steps",Qe="Used by pools",Je="Rule is not in use.",ke="This field is required!",Ve="The rule can't be used in the current cluster as it has too few OSDs to meet the minimum required OSD by this rule.",Ye="Compression",Be="Mode",je="Algorithm",Ke="Minimum blob size",We="e.g., 128KiB",e_="Maximum blob size",__="e.g., 512KiB",o_="Ratio",t_="Compression ratio",i_="Loading...",n_="-- No erasure compression algorithm available --",s_="Value should be greater than 0",a_="Value should be less than the maximum blob size",l_="Value should be greater than 0",r_="Value should be greater than the minimum blob size",c_="Value should be between 0.0 and 1.0",O_="The value should be greater or equal to 0",[["class","cd-col-form",4,"cdFormLoading"],[1,"cd-col-form"],["name","form","novalidate","",3,"formGroup"],["formDir","ngForm"],[1,"card"],[1,"card-header"],i,[1,"card-body"],[1,"form-group","row"],["for","name",1,"cd-col-form-label","required"],_,[1,"cd-col-form-input"],["id","name","name","name","type","text","placeholder",o,"formControlName","name","autofocus","",1,"form-control"],["class","invalid-feedback",4,"ngIf"],["for","poolType",1,"cd-col-form-label","required"],n,["id","poolType","formControlName","poolType","name","poolType",1,"form-select"],["ngValue",""],s,[3,"value",4,"ngFor","ngForOf"],[4,"ngIf"],["for","applications",1,"cd-col-form-label"],c,["id","applications",3,"customBadges","customBadgeValidators","messages","data","options","selectionLimit","selection"],["title",d,3,"class",4,"ngIf"],["formGroupName","compression",4,"ngIf"],f,["for","max_bytes",1,"cd-col-form-label"],p,R,h,["id","max_bytes","name","max_bytes","type","text","formControlName","max_bytes","placeholder",S,"defaultUnit","GiB","cdDimlessBinary","",1,"form-control"],["for","max_objects",1,"cd-col-form-label"],m,P,A,["id","max_objects","min","0","name","max_objects","type","number","formControlName","max_objects",1,"form-control"],[3,"hidden"],[3,"form","initializeData","changes"],[1,"card-footer"],["wrappingClass","text-right",3,"form","submitText","submitActionEvent"],[1,"invalid-feedback"],I,$,D,x,[3,"value"],y,["for","pgAutoscaleMode",1,"cd-col-form-label"],Z,["id","pgAutoscaleMode","name","pgAutoscaleMode","formControlName","pgAutoscaleMode",1,"form-select"],["class","form-group row",4,"ngIf"],["for","pgNum",1,"cd-col-form-label","required"],U,["id","pgNum","name","pgNum","formControlName","pgNum","min","1","type","number","required","",1,"form-control",3,"focus","blur"],[1,"form-text","text-muted"],["section","pgs","docText",H],["class","form-text text-muted",4,"ngIf"],G,z,q,X,["for","size",1,"cd-col-form-label","required"],w,["id","size","name","size","type","number","formControlName","size",1,"form-control",3,"max","min"],["class","text-warning-dark",4,"ngIf"],[1,"list-inline"],Q,J,k,[1,"text-warning-dark"],V,[1,"cd-col-form-label"],Y,[1,"custom-control","custom-checkbox"],["type","checkbox","id","ec-overwrites","formControlName","ecOverwrites",1,"custom-control-input"],["for","ec-overwrites",1,"custom-control-label"],B,["title",d],j,["for","erasureProfile",1,"cd-col-form-label"],K,[1,"input-group","mb-1"],["id","erasureProfile","name","erasureProfile","formControlName","erasureProfile",1,"form-select"],["ngValue","",4,"ngIf"],[3,"ngValue",4,"ngIf"],[3,"ngValue",4,"ngFor","ngForOf"],["id","ecp-info-button","type","button",1,"btn","btn-light",3,"ngClass","click"],["aria-hidden","true",3,"ngClass"],["class","btn btn-light","type","button",3,"click",4,"ngIf"],["class","btn btn-light","type","button","ngbTooltip",N,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","ecp-info-block",4,"ngIf"],W,[3,"ngValue"],ee,_e,["type","button",1,"btn","btn-light",3,"click"],["type","button","ngbTooltip",N,"triggers","manual",1,"btn","btn-light",3,"click"],["ecpDeletionBtn","ngbTooltip"],["id","ecp-info-block",1,"form-text","text-muted"],["ngbNav","",1,"nav-tabs"],["ecpInfoTabs","ngbNav"],["ngbNavItem","ecp-info"],["ngbNavLink",""],oe,["ngbNavContent",""],["ngbNavItem","used-by-pools"],te,[3,"ngbNavOutlet"],[3,"renderObjects","hideKeys","data","autoReload"],["ecpIsNotUsed",""],[4,"ngIf","ngIfElse"],ie,[4,"ngFor","ngForOf"],["for","crushRule",1,"cd-col-form-label"],ne,se,ae,["noRules",""],le,[1,"input-group"],["id","crushRule","formControlName","crushRule","name","crushSet",1,"form-select"],re,["id","crush-info-button","type","button","ngbTooltip",ce,1,"btn","btn-light",3,"ngClass","click"],["class","btn btn-light","type","button","ngbTooltip",O,"triggers","manual",3,"click",4,"ngIf"],["class","form-text text-muted","id","crush-info-block",4,"ngIf"],["type","button","ngbTooltip",O,"triggers","manual",1,"btn","btn-light",3,"click"],["crushDeletionBtn","ngbTooltip"],["id","crush-info-block",1,"form-text","text-muted"],["crushInfoTabs","ngbNav"],["ngbNavItem","crush-rule-info"],Xe,["ngbNavItem","crush-rule-steps"],we,Qe,["ruleIsNotUsed",""],Je,ke,Ve,["formGroupName","compression"],Ye,["for","mode",1,"cd-col-form-label"],Be,["id","mode","name","mode","formControlName","mode",1,"form-select"],["for","algorithm",1,"cd-col-form-label"],je,["id","algorithm","name","algorithm","formControlName","algorithm",1,"form-select"],["for","minBlobSize",1,"cd-col-form-label"],Ke,["id","minBlobSize","name","minBlobSize","formControlName","minBlobSize","type","text","min","0","placeholder",We,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","maxBlobSize",1,"cd-col-form-label"],e_,["id","maxBlobSize","type","text","min","0","formControlName","maxBlobSize","placeholder",__,"defaultUnit","KiB","cdDimlessBinary","",1,"form-control"],["for","ratio",1,"cd-col-form-label"],o_,["id","ratio","name","ratio","formControlName","ratio","type","number","min","0","max","1","step","0.1","placeholder",t_,1,"form-control"],i_,n_,s_,a_,l_,r_,c_,O_]},template:function(_,o){1&_&&e.YNc(0,Yt,70,33,"div",0),2&_&&e.Q6J("cdFormLoading",o.loading)},directives:[bo.y,a._Y,a.JL,Pe.V,a.sg,fe.P,Ee.o,a.Fj,ge.b,a.JJ,a.u,pe.U,C.O5,a.EJ,a.YN,a.Kr,C.sg,a.wV,a.qQ,a.Q7,vo.K,a.Fd,a.Wl,Io.m,C.mk,g._L,g.Pz,g.nv,g.Vx,g.uN,ye.b,g.tO,a.x0,$o.Q,Re.S,Do.d,me.p],pipes:[C.rS,Ce.m],styles:[".icon-warning-color[_ngcontent-%COMP%]{margin-left:3px}"]}),t})();var Bt=r(19773),jt=r(20687),Kt=r(68136),Se=r(69158),Te=r(83697),L=r(99466),Wt=r(91801),ei=r(68774),_i=r(66369),He=r(38047),Le=r(51847);class oi{constructor(i){this.pool_name=i}}var ti=r(64724),ii=r(94928),Ge=r(51295),ni=r(59376),ze=r(76317),si=r(42176);function ai(t,i){if(1&t&&e._UZ(0,"cd-table-key-value",12),2&t){const _=e.oxw(2);e.Q6J("renderObjects",!0)("data",_.poolDetails)("autoReload",!1)}}function li(t,i){if(1&t&&e._UZ(0,"cd-grafana",15),2&t){const _=e.oxw(3);e.MGl("grafanaPath","ceph-pool-detail?var-pool_name=",_.selection.pool_name,""),e.Q6J("type","metrics")}}function ri(t,i){1&t&&(e.ynx(0,13),e.TgZ(1,"a",5),e.SDv(2,14),e.qZA(),e.YNc(3,li,1,2,"ng-template",7),e.BQk())}function ci(t,i){if(1&t&&e._UZ(0,"cd-rbd-configuration-table",18),2&t){const _=e.oxw(3);e.Q6J("data",_.selectedPoolConfiguration)}}function Oi(t,i){1&t&&(e.ynx(0,16),e.TgZ(1,"a",5),e.SDv(2,17),e.qZA(),e.YNc(3,ci,1,1,"ng-template",7),e.BQk())}function di(t,i){if(1&t&&e._UZ(0,"cd-table",21),2&t){const _=e.oxw(3);e.Q6J("data",_.cacheTiers)("columns",_.cacheTierColumns)("autoSave",!1)}}function ui(t,i){1&t&&(e.ynx(0,19),e.TgZ(1,"a",5),e.SDv(2,20),e.qZA(),e.YNc(3,di,1,3,"ng-template",7),e.BQk())}function Pi(t,i){if(1&t&&(e.ynx(0,1),e.TgZ(1,"nav",2,3),e.ynx(3,4),e.TgZ(4,"a",5),e.SDv(5,6),e.qZA(),e.YNc(6,ai,1,3,"ng-template",7),e.BQk(),e.YNc(7,ri,4,0,"ng-container",8),e.YNc(8,Oi,4,0,"ng-container",9),e.YNc(9,ui,4,0,"ng-container",10),e.qZA(),e._UZ(10,"div",11),e.BQk()),2&t){const _=e.MAs(2),o=e.oxw();e.xp6(7),e.Q6J("ngIf",o.permissions.grafana.read),e.xp6(1),e.Q6J("ngIf","replicated"===o.selection.type),e.xp6(1),e.Q6J("ngIf",(null==o.selection.tiers?null:o.selection.tiers.length)>0),e.xp6(1),e.Q6J("ngbNavOutlet",_)}}let fi=(()=>{class t{constructor(_){this.poolService=_,this.cacheTierColumns=[],this.omittedPoolAttributes=["cdExecuting","cdIsBinary","stats"],this.cacheTierColumns=[{prop:"pool_name",name:"Name",flexGrow:3},{prop:"cache_mode",name:"Cache Mode",flexGrow:2},{prop:"cache_min_evict_age",name:"Min Evict Age",flexGrow:2},{prop:"cache_min_flush_age",name:"Min Flush Age",flexGrow:2},{prop:"target_max_bytes",name:"Target Max Bytes",flexGrow:2},{prop:"target_max_objects",name:"Target Max Objects",flexGrow:2}]}ngOnChanges(){this.selection&&(this.poolService.getConfiguration(this.selection.pool_name).subscribe(_=>{Ge.T.updateChanged(this,{selectedPoolConfiguration:_})}),Ge.T.updateChanged(this,{poolDetails:u().omit(this.selection,this.omittedPoolAttributes)}))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-details"]],inputs:{cacheTiers:"cacheTiers",permissions:"permissions",selection:"selection"},features:[e.TTD],decls:1,vars:1,consts:function(){let i,_,o,n,s;return i="Details",_="Performance Details",o="Pool details",n="Configuration",s="Cache Tiers Details",[["cdTableDetail","",4,"ngIf"],["cdTableDetail",""],["ngbNav","","cdStatefulTab","pool-details",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem","details"],["ngbNavLink",""],i,["ngbNavContent",""],["ngbNavItem","performance-details",4,"ngIf"],["ngbNavItem","configuration",4,"ngIf"],["ngbNavItem","cache-tiers-details",4,"ngIf"],[3,"ngbNavOutlet"],[3,"renderObjects","data","autoReload"],["ngbNavItem","performance-details"],_,["title",o,"uid","-xyV8KCiz","grafanaStyle","three",3,"grafanaPath","type"],["ngbNavItem","configuration"],n,[3,"data"],["ngbNavItem","cache-tiers-details"],s,["columnMode","flex",3,"data","columns","autoSave"]]},template:function(_,o){1&_&&e.YNc(0,Pi,11,4,"ng-container",0),2&_&&e.Q6J("ngIf",o.selection)},directives:[C.O5,g.Pz,ni.m,g.nv,g.Vx,g.uN,ye.b,ze.F,si.P,Te.a,g.tO],styles:[""],changeDetection:0}),t})(),Ei=(()=>{class t{constructor(_,o,n){this.templateRef=_,this.viewContainer=o,this.authStorageService=n,this.cdScopeMatchAll=!0}set cdScope(_){this.permissions=this.authStorageService.getPermissions(),this.isAuthorized(_)?this.viewContainer.createEmbeddedView(this.templateRef):this.viewContainer.clear()}isAuthorized(_){const o=this.cdScopeMatchAll?u().every:u().some;return u().isString(_)?u().get(this.permissions,[_,"read"],!1):u().isArray(_)?o(_,n=>this.permissions[n].read):!!u().isObject(_)&&o(_,(n,s)=>o(n,c=>this.permissions[s][c]))}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(e.Rgc),e.Y36(e.s_b),e.Y36(he.j))},t.\u0275dir=e.lG2({type:t,selectors:[["","cdScope",""]],inputs:{cdScope:"cdScope",cdScopeMatchAll:"cdScopeMatchAll"}}),t})();var gi=r(60251);const pi=["poolUsageTpl"],Ri=["poolConfigurationSourceTpl"];function mi(t,i){if(1&t){const _=e.EpF();e.TgZ(0,"cd-table",9,10),e.NdJ("fetchData",function(){return e.CHM(_),e.oxw().taskListService.fetch()})("setExpandedRow",function(n){return e.CHM(_),e.oxw().setExpandedRow(n)})("updateSelection",function(n){return e.CHM(_),e.oxw().updateSelection(n)}),e._UZ(2,"cd-table-actions",11)(3,"cd-pool-details",12),e.qZA()}if(2&t){const _=e.oxw();e.Q6J("data",_.pools)("columns",_.columns)("hasDetails",!0)("status",_.tableStatus)("autoReload",-1),e.xp6(2),e.Q6J("permission",_.permissions.pool)("selection",_.selection)("tableActions",_.tableActions),e.xp6(1),e.Q6J("selection",_.expandedRow)("permissions",_.permissions)("cacheTiers",_.cacheTiers)}}function Ci(t,i){1&t&&e._UZ(0,"cd-grafana",14),2&t&&e.Q6J("grafanaPath","ceph-pools-overview?")("type","metrics")}function Mi(t,i){1&t&&(e.ynx(0,2),e.TgZ(1,"a",3),e.SDv(2,13),e.qZA(),e.YNc(3,Ci,1,2,"ng-template",5),e.BQk())}function hi(t,i){if(1&t&&e._UZ(0,"cd-usage-bar",16),2&t){const _=e.oxw().row;e.Q6J("total",_.stats.bytes_used.latest+_.stats.avail_raw.latest)("used",_.stats.bytes_used.latest)("title",_.pool_name)}}function Si(t,i){if(1&t&&e.YNc(0,hi,1,3,"cd-usage-bar",15),2&t){const _=i.row;e.Q6J("ngIf",null==_.stats||null==_.stats.avail_raw?null:_.stats.avail_raw.latest)}}let Ti=(()=>{class t extends Kt.o{constructor(_,o,n,s,c,d,f,p,R,h,S){super(),this.poolService=_,this.taskWrapper=o,this.ecpService=n,this.authStorageService=s,this.taskListService=c,this.modalService=d,this.pgCategoryService=f,this.dimlessPipe=p,this.urlBuilder=R,this.configurationService=h,this.actionLabels=S,this.selection=new ei.r,this.executingTasks=[],this.tableStatus=new Se.E,this.cacheTiers=[],this.monAllowPoolDelete=!1,this.permissions=this.authStorageService.getPermissions(),this.tableActions=[{permission:"create",icon:b.P.add,routerLink:()=>this.urlBuilder.getCreate(),name:this.actionLabels.CREATE},{permission:"update",icon:b.P.edit,routerLink:()=>this.urlBuilder.getEdit(encodeURIComponent(this.selection.first().pool_name)),name:this.actionLabels.EDIT},{permission:"delete",icon:b.P.destroy,click:()=>this.deletePoolModal(),name:this.actionLabels.DELETE,disable:this.getDisableDesc.bind(this)}],this.permissions.configOpt.read&&this.configurationService.get("mon_allow_pool_delete").subscribe(m=>{if(u().has(m,"value")){const P=u().find(m.value,A=>"mon"===A.section)||{value:!1};this.monAllowPoolDelete="true"===P.value}})}ngOnInit(){const _=(o,n,s)=>u().get(n,o)>u().get(s,o)?1:-1;this.columns=[{prop:"pool_name",name:"Name",flexGrow:4,cellTransformation:L.e.executing},{prop:"data_protection",name:"Data Protection",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-gray"},flexGrow:1.3},{prop:"application_metadata",name:"Applications",cellTransformation:L.e.badge,customTemplateConfig:{class:"badge-background-primary"},flexGrow:1.5},{prop:"pg_status",name:"PG Status",flexGrow:1.2,cellClass:({row:o,column:n,value:s})=>this.getPgStatusCellClass(o,n,s)},{prop:"crush_rule",name:"Crush Ruleset",isHidden:!0,flexGrow:2},{name:"Usage",prop:"usage",cellTemplate:this.poolUsageTpl,flexGrow:1.2},{prop:"stats.rd_bytes.rates",name:"Read bytes",comparator:(o,n,s,c)=>_("stats.rd_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.wr_bytes.rates",name:"Write bytes",comparator:(o,n,s,c)=>_("stats.wr_bytes.latest",s,c),cellTransformation:L.e.sparkline,flexGrow:1.5},{prop:"stats.rd.rate",name:"Read ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond},{prop:"stats.wr.rate",name:"Write ops",flexGrow:1,pipe:this.dimlessPipe,cellTransformation:L.e.perSecond}],this.taskListService.init(()=>this.ecpService.list().pipe((0,Bt.zg)(o=>(this.ecProfileList=o,this.poolService.getList()))),void 0,o=>{this.pools=this.transformPoolsData(o),this.tableStatus=new Se.E},()=>{this.table.reset(),this.tableStatus=new Se.E(Wt.T.ValueException)},o=>o.name.startsWith("pool/"),(o,n)=>n.metadata.pool_name===o.pool_name,{default:o=>new oi(o.pool_name)})}updateSelection(_){this.selection=_}deletePoolModal(){const _=this.selection.first().pool_name;this.modalService.show(Ne.M,{itemDescription:"Pool",itemNames:[_],submitActionObservable:()=>this.taskWrapper.wrapTaskAroundCall({task:new v.R(`pool/${M.MQ.DELETE}`,{pool_name:_}),call:this.poolService.delete(_)})})}getPgStatusCellClass(_,o,n){return{"text-right":!0,[`pg-${this.pgCategoryService.getTypeByStates(n)}`]:!0}}getErasureCodeProfile(_){let o="";return u().forEach(this.ecProfileList,n=>{n.name===_&&(o=`EC: ${n.k}+${n.m}`)}),o}transformPoolsData(_){const o=["bytes_used","max_avail","avail_raw","percent_used","rd_bytes","wr_bytes","rd","wr"],n={latest:0,rate:0,rates:[]};return u().forEach(_,s=>{s.pg_status=this.transformPgStatus(s.pg_status);const c={};u().forEach(o,d=>{c[d]=s.stats&&s.stats[d]?s.stats[d]:n}),s.stats=c,s.usage=c.percent_used.latest,!s.cdExecuting&&s.pg_num+s.pg_placement_num!==s.pg_num_target+s.pg_placement_num_target&&(s.cdExecuting="Updating"),["rd_bytes","wr_bytes"].forEach(d=>{s.stats[d].rates=s.stats[d].rates.map(f=>f[1])}),s.cdIsBinary=!0,"erasure"===s.type&&(s.data_protection=this.getErasureCodeProfile(s.erasure_code_profile)),"replicated"===s.type&&(s.data_protection=`replica: \xd7${s.size}`)}),_}transformPgStatus(_){const o=[];return u().forEach(_,(n,s)=>{o.push(`${n} ${s}`)}),o.join(", ")}getSelectionTiers(){if(void 0!==this.expandedRow){const _=this.expandedRow.tiers;this.cacheTiers=this.pools.filter(o=>_.includes(o.pool))}}getDisableDesc(){var _;return!(null===(_=this.selection)||void 0===_?void 0:_.hasSelection)||!this.monAllowPoolDelete&&"Pool deletion is disabled by the mon_allow_pool_delete configuration setting."}setExpandedRow(_){super.setExpandedRow(_),this.getSelectionTiers()}}return t.\u0275fac=function(_){return new(_||t)(e.Y36(ue.q),e.Y36(de.P),e.Y36(Me),e.Y36(he.j),e.Y36(He.j),e.Y36(xe.Z),e.Y36(jt.j),e.Y36(_i.n),e.Y36(Le.F),e.Y36(ti.e),e.Y36(M.p4))},t.\u0275cmp=e.Xpm({type:t,selectors:[["cd-pool-list"]],viewQuery:function(_,o){if(1&_&&(e.Gf(Te.a,5),e.Gf(pi,7),e.Gf(Ri,5)),2&_){let n;e.iGM(n=e.CRH())&&(o.table=n.first),e.iGM(n=e.CRH())&&(o.poolUsageTpl=n.first),e.iGM(n=e.CRH())&&(o.poolConfigurationSourceTpl=n.first)}},features:[e._Bn([He.j,{provide:Le.F,useValue:new Le.F("pool")}]),e.qOj],decls:10,vars:2,consts:function(){let i,_,o;return i="Pools List",_="Overall Performance",o="Ceph pools overview",[["ngbNav","",1,"nav-tabs"],["nav","ngbNav"],["ngbNavItem",""],["ngbNavLink",""],i,["ngbNavContent",""],["ngbNavItem","",4,"cdScope"],[3,"ngbNavOutlet"],["poolUsageTpl",""],["id","pool-list","selectionType","single",3,"data","columns","hasDetails","status","autoReload","fetchData","setExpandedRow","updateSelection"],["table",""],["id","pool-list-actions",1,"table-actions",3,"permission","selection","tableActions"],["cdTableDetail","","id","pool-list-details",3,"selection","permissions","cacheTiers"],_,["title",o,"uid","z99hzWtmk","grafanaStyle","two",3,"grafanaPath","type"],["decimals","2",3,"total","used","title",4,"ngIf"],["decimals","2",3,"total","used","title"]]},template:function(_,o){if(1&_&&(e.TgZ(0,"nav",0,1),e.ynx(2,2),e.TgZ(3,"a",3),e.SDv(4,4),e.qZA(),e.YNc(5,mi,4,11,"ng-template",5),e.BQk(),e.YNc(6,Mi,4,0,"ng-container",6),e.qZA(),e._UZ(7,"div",7),e.YNc(8,Si,1,1,"ng-template",null,8,e.W1O)),2&_){const n=e.MAs(1);e.xp6(6),e.Q6J("cdScope","grafana"),e.xp6(1),e.Q6J("ngbNavOutlet",n)}},directives:[g.Pz,g.nv,g.Vx,g.uN,Te.a,ii.K,fi,Ei,ze.F,g.tO,C.O5,gi.O],styles:["cd-pool-list .pg-clean{color:#008a00} cd-pool-list .pg-working{color:#25828e} cd-pool-list .pg-warning{color:#d48200} cd-pool-list .pg-unknown{color:#dc3545}"]}),t})(),qe=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[P_.t,C.ez,g.Oz,d_.m,Oe.Bz,a.UX,g.HK,u_.BlockModule]]}),t})();const Li=[{path:"",component:Ti},{path:M.MQ.CREATE,component:Ue,data:{breadcrumbs:M.Qn.CREATE}},{path:`${M.MQ.EDIT}/:name`,component:Ue,data:{breadcrumbs:M.Qn.EDIT}}];let Ai=(()=>{class t{}return t.\u0275fac=function(_){return new(_||t)},t.\u0275mod=e.oAB({type:t}),t.\u0275inj=e.cJS({imports:[[qe,Oe.Bz.forChild(Li)]]}),t})()}}]); \ No newline at end of file diff --git a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html index fedb83e8d..472dff728 100644 --- a/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html +++ b/ceph/src/pybind/mgr/dashboard/frontend/dist/en-US/index.html @@ -4,7 +4,7 @@ - +