2026-04-13T17:46:54.405 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-04-13T17:46:54.411 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-13T17:46:54.431 INFO:teuthology.run:Config: archive_path: /archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194 branch: wip-sse-s3-on-v20.2.0 description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/jaeger 3-final} email: null first_in_suite: false flavor: default job_id: '5194' last_in_suite: false machine_type: vps name: supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: centos os_version: 9.stream overrides: admin_socket: branch: wip-sse-s3-on-v20.2.0 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: logical_volumes: lv_1: scratch_dev: true size: 25%VG vg: vg_nvme lv_2: scratch_dev: true size: 25%VG vg: vg_nvme lv_3: scratch_dev: true size: 25%VG vg: vg_nvme lv_4: scratch_dev: true size: 25%VG vg: vg_nvme timezone: UTC volume_groups: vg_nvme: pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: 0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360 ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 install: ceph: flavor: default sha1: 0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360 extra_system_packages: deb: - python3-jmespath - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-jmespath - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-18-g0d1a6d86d0e/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-18-g0d1a6d86d0e/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-18-g0d1a6d86d0e/el9.clyso/x86_64 s3tests: sha1: e0c4ff71baef6d5126a0201df5fe54196d89b296 selinux: allowlist: - scontext=system_u:system_r:logrotate_t:s0 - scontext=system_u:system_r:getty_t:s0 workunit: branch: tt-wip-sse-s3-on-v20.2.0 sha1: b1406032198400da42bafed19de7ca570962438d owner: supriti priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 3472 sha1: 0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360 sleep_before_teardown: 0 suite: orch:cephadm:smoke-roleless suite_branch: tt-wip-sse-s3-on-v20.2.0 suite_path: /home/teuthos/src/github.com_kshtsk_ceph_b1406032198400da42bafed19de7ca570962438d/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: b1406032198400da42bafed19de7ca570962438d targets: vm00.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGg0l8Z94VN52mDwAqmXb8CDNuXEHJcIiLiTy2OdNf0+D+iE3qqednrE79f5FEBSPtxYGT3xz5M7bITQYEAq8mc= vm01.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBItOgkGQOmf5nAZ+QdTw4c7suzBxoEIJ3yisMPxUP7YPje/5nPUp+4jZE2bd0EfCmLZm3l+piks3l84I3Cyguls= tasks: - pexec: all: - sudo dnf remove nvme-cli -y - sudo dnf install nvmetcli nvme-cli -y - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - cephadm.shell: host.a: - ceph orch apply jaeger - cephadm.wait_for_service: service: elasticsearch - cephadm.wait_for_service: service: jaeger-collector - cephadm.wait_for_service: service: jaeger-query - cephadm.wait_for_service: service: jaeger-agent - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/kshtsk/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-04-13_15:28:06 tube: vps user: supriti verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.3072398 2026-04-13T17:46:54.432 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_b1406032198400da42bafed19de7ca570962438d/qa; will attempt to use it 2026-04-13T17:46:54.432 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_b1406032198400da42bafed19de7ca570962438d/qa/tasks 2026-04-13T17:46:54.432 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-04-13T17:46:54.432 INFO:teuthology.task.internal:Saving configuration 2026-04-13T17:46:54.438 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-04-13T17:46:54.439 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-04-13T17:46:54.445 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm00.local', 'description': '/archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-13 17:45:40.486195', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:00', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGg0l8Z94VN52mDwAqmXb8CDNuXEHJcIiLiTy2OdNf0+D+iE3qqednrE79f5FEBSPtxYGT3xz5M7bITQYEAq8mc='} 2026-04-13T17:46:54.452 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm01.local', 'description': '/archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-13 17:45:40.485769', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:01', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBItOgkGQOmf5nAZ+QdTw4c7suzBxoEIJ3yisMPxUP7YPje/5nPUp+4jZE2bd0EfCmLZm3l+piks3l84I3Cyguls='} 2026-04-13T17:46:54.452 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-04-13T17:46:54.453 INFO:teuthology.task.internal:roles: ubuntu@vm00.local - ['host.a', 'client.0'] 2026-04-13T17:46:54.453 INFO:teuthology.task.internal:roles: ubuntu@vm01.local - ['host.b', 'client.1'] 2026-04-13T17:46:54.453 INFO:teuthology.run_tasks:Running task console_log... 2026-04-13T17:46:54.460 DEBUG:teuthology.task.console_log:vm00 does not support IPMI; excluding 2026-04-13T17:46:54.465 DEBUG:teuthology.task.console_log:vm01 does not support IPMI; excluding 2026-04-13T17:46:54.466 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f4caccdfeb0>, signals=[15]) 2026-04-13T17:46:54.466 INFO:teuthology.run_tasks:Running task internal.connect... 2026-04-13T17:46:54.466 INFO:teuthology.task.internal:Opening connections... 2026-04-13T17:46:54.466 DEBUG:teuthology.task.internal:connecting to ubuntu@vm00.local 2026-04-13T17:46:54.467 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-13T17:46:54.528 DEBUG:teuthology.task.internal:connecting to ubuntu@vm01.local 2026-04-13T17:46:54.528 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-13T17:46:54.588 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-04-13T17:46:54.589 DEBUG:teuthology.orchestra.run.vm00:> uname -m 2026-04-13T17:46:54.647 INFO:teuthology.orchestra.run.vm00.stdout:x86_64 2026-04-13T17:46:54.647 DEBUG:teuthology.orchestra.run.vm00:> cat /etc/os-release 2026-04-13T17:46:54.703 INFO:teuthology.orchestra.run.vm00.stdout:NAME="CentOS Stream" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:VERSION="9" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:ID="centos" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:ID_LIKE="rhel fedora" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:VERSION_ID="9" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:PLATFORM_ID="platform:el9" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:PRETTY_NAME="CentOS Stream 9" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:ANSI_COLOR="0;31" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:LOGO="fedora-logo-icon" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:HOME_URL="https://centos.org/" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-04-13T17:46:54.704 INFO:teuthology.orchestra.run.vm00.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-04-13T17:46:54.704 INFO:teuthology.lock.ops:Updating vm00.local on lock server 2026-04-13T17:46:54.709 DEBUG:teuthology.orchestra.run.vm01:> uname -m 2026-04-13T17:46:54.724 INFO:teuthology.orchestra.run.vm01.stdout:x86_64 2026-04-13T17:46:54.724 DEBUG:teuthology.orchestra.run.vm01:> cat /etc/os-release 2026-04-13T17:46:54.780 INFO:teuthology.orchestra.run.vm01.stdout:NAME="CentOS Stream" 2026-04-13T17:46:54.780 INFO:teuthology.orchestra.run.vm01.stdout:VERSION="9" 2026-04-13T17:46:54.780 INFO:teuthology.orchestra.run.vm01.stdout:ID="centos" 2026-04-13T17:46:54.780 INFO:teuthology.orchestra.run.vm01.stdout:ID_LIKE="rhel fedora" 2026-04-13T17:46:54.780 INFO:teuthology.orchestra.run.vm01.stdout:VERSION_ID="9" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:PLATFORM_ID="platform:el9" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:PRETTY_NAME="CentOS Stream 9" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:ANSI_COLOR="0;31" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:LOGO="fedora-logo-icon" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:HOME_URL="https://centos.org/" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-04-13T17:46:54.781 INFO:teuthology.orchestra.run.vm01.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-04-13T17:46:54.781 INFO:teuthology.lock.ops:Updating vm01.local on lock server 2026-04-13T17:46:54.786 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-04-13T17:46:54.787 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-04-13T17:46:54.788 INFO:teuthology.task.internal:Checking for old test directory... 2026-04-13T17:46:54.788 DEBUG:teuthology.orchestra.run.vm00:> test '!' -e /home/ubuntu/cephtest 2026-04-13T17:46:54.790 DEBUG:teuthology.orchestra.run.vm01:> test '!' -e /home/ubuntu/cephtest 2026-04-13T17:46:54.840 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-04-13T17:46:54.842 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-04-13T17:46:54.842 DEBUG:teuthology.orchestra.run.vm00:> test -z $(ls -A /var/lib/ceph) 2026-04-13T17:46:54.847 DEBUG:teuthology.orchestra.run.vm01:> test -z $(ls -A /var/lib/ceph) 2026-04-13T17:46:54.860 INFO:teuthology.orchestra.run.vm00.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-13T17:46:54.898 INFO:teuthology.orchestra.run.vm01.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-13T17:46:54.898 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-04-13T17:46:54.906 DEBUG:teuthology.orchestra.run.vm00:> test -e /ceph-qa-ready 2026-04-13T17:46:54.922 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-13T17:46:55.132 DEBUG:teuthology.orchestra.run.vm01:> test -e /ceph-qa-ready 2026-04-13T17:46:55.149 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-13T17:46:55.351 INFO:teuthology.run_tasks:Running task internal.base... 2026-04-13T17:46:55.352 INFO:teuthology.task.internal:Creating test directory... 2026-04-13T17:46:55.352 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-13T17:46:55.354 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-13T17:46:55.370 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-04-13T17:46:55.372 INFO:teuthology.run_tasks:Running task internal.archive... 2026-04-13T17:46:55.373 INFO:teuthology.task.internal:Creating archive directory... 2026-04-13T17:46:55.373 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-13T17:46:55.410 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-13T17:46:55.430 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-04-13T17:46:55.431 INFO:teuthology.task.internal:Enabling coredump saving... 2026-04-13T17:46:55.432 DEBUG:teuthology.orchestra.run.vm00:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-13T17:46:55.482 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-13T17:46:55.482 DEBUG:teuthology.orchestra.run.vm01:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-13T17:46:55.498 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-13T17:46:55.498 DEBUG:teuthology.orchestra.run.vm00:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-13T17:46:55.524 DEBUG:teuthology.orchestra.run.vm01:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-13T17:46:55.547 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-13T17:46:55.557 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-13T17:46:55.563 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-13T17:46:55.573 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-13T17:46:55.575 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-04-13T17:46:55.576 INFO:teuthology.task.internal:Configuring sudo... 2026-04-13T17:46:55.576 DEBUG:teuthology.orchestra.run.vm00:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-13T17:46:55.601 DEBUG:teuthology.orchestra.run.vm01:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-13T17:46:55.641 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-04-13T17:46:55.643 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-04-13T17:46:55.643 DEBUG:teuthology.orchestra.run.vm00:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-13T17:46:55.665 DEBUG:teuthology.orchestra.run.vm01:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-13T17:46:55.698 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-13T17:46:55.746 DEBUG:teuthology.orchestra.run.vm00:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-13T17:46:55.801 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:46:55.801 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-13T17:46:55.863 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-13T17:46:55.887 DEBUG:teuthology.orchestra.run.vm01:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-13T17:46:55.946 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:46:55.946 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-13T17:46:56.008 DEBUG:teuthology.orchestra.run.vm00:> sudo service rsyslog restart 2026-04-13T17:46:56.010 DEBUG:teuthology.orchestra.run.vm01:> sudo service rsyslog restart 2026-04-13T17:46:56.040 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-13T17:46:56.079 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-13T17:46:56.426 INFO:teuthology.run_tasks:Running task internal.timer... 2026-04-13T17:46:56.427 INFO:teuthology.task.internal:Starting timer... 2026-04-13T17:46:56.427 INFO:teuthology.run_tasks:Running task pcp... 2026-04-13T17:46:56.430 INFO:teuthology.run_tasks:Running task selinux... 2026-04-13T17:46:56.432 DEBUG:teuthology.task:Applying overrides for task selinux: {'allowlist': ['scontext=system_u:system_r:logrotate_t:s0', 'scontext=system_u:system_r:getty_t:s0']} 2026-04-13T17:46:56.432 INFO:teuthology.task.selinux:Excluding vm00: VMs are not yet supported 2026-04-13T17:46:56.432 INFO:teuthology.task.selinux:Excluding vm01: VMs are not yet supported 2026-04-13T17:46:56.432 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-04-13T17:46:56.432 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-04-13T17:46:56.432 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-04-13T17:46:56.432 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-04-13T17:46:56.433 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'logical_volumes': {'lv_1': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_2': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_3': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_4': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}}, 'timezone': 'UTC', 'volume_groups': {'vg_nvme': {'pvs': '/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde'}}}} 2026-04-13T17:46:56.434 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-04-13T17:46:56.435 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-04-13T17:46:57.041 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-04-13T17:46:57.047 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-04-13T17:46:57.048 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "logical_volumes": {"lv_1": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_2": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_3": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_4": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}}, "timezone": "UTC", "volume_groups": {"vg_nvme": {"pvs": "/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde"}}}' -i /tmp/teuth_ansible_inventory3jr2yx7_ --limit vm00.local,vm01.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-04-13T17:48:27.351 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm00.local'), Remote(name='ubuntu@vm01.local')] 2026-04-13T17:48:27.352 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm00.local' 2026-04-13T17:48:27.352 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm00.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-13T17:48:27.417 DEBUG:teuthology.orchestra.run.vm00:> true 2026-04-13T17:48:27.495 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm00.local' 2026-04-13T17:48:27.495 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm01.local' 2026-04-13T17:48:27.496 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm01.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-13T17:48:27.558 DEBUG:teuthology.orchestra.run.vm01:> true 2026-04-13T17:48:27.644 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm01.local' 2026-04-13T17:48:27.644 INFO:teuthology.run_tasks:Running task clock... 2026-04-13T17:48:27.647 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-04-13T17:48:27.647 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-13T17:48:27.647 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-13T17:48:27.649 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-13T17:48:27.649 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-13T17:48:27.681 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-04-13T17:48:27.695 INFO:teuthology.orchestra.run.vm00.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-04-13T17:48:27.720 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-04-13T17:48:27.722 INFO:teuthology.orchestra.run.vm00.stderr:sudo: ntpd: command not found 2026-04-13T17:48:27.731 INFO:teuthology.orchestra.run.vm00.stdout:506 Cannot talk to daemon 2026-04-13T17:48:27.737 INFO:teuthology.orchestra.run.vm01.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-04-13T17:48:27.745 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-04-13T17:48:27.761 INFO:teuthology.orchestra.run.vm00.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-04-13T17:48:27.764 INFO:teuthology.orchestra.run.vm01.stderr:sudo: ntpd: command not found 2026-04-13T17:48:27.777 INFO:teuthology.orchestra.run.vm01.stdout:506 Cannot talk to daemon 2026-04-13T17:48:27.793 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-04-13T17:48:27.810 INFO:teuthology.orchestra.run.vm01.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-04-13T17:48:27.812 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-04-13T17:48:27.865 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm00.stdout:^? mail.light-speed.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm00.stdout:^? s7.vonderste.in 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm00.stdout:^? time.cloudflare.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm00.stdout:^? mail2.light-speed.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm01.stdout:^? mail2.light-speed.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm01.stdout:^? mail.light-speed.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm01.stdout:^? time.cloudflare.com 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.966 INFO:teuthology.orchestra.run.vm01.stdout:^? s7.vonderste.in 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-04-13T17:48:27.967 INFO:teuthology.run_tasks:Running task pexec... 2026-04-13T17:48:27.969 INFO:teuthology.task.pexec:Executing custom commands... 2026-04-13T17:48:27.969 DEBUG:teuthology.orchestra.run.vm00:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-04-13T17:48:27.969 DEBUG:teuthology.orchestra.run.vm01:> TESTDIR=/home/ubuntu/cephtest bash -s 2026-04-13T17:48:28.009 DEBUG:teuthology.task.pexec:ubuntu@vm00.local< sudo dnf remove nvme-cli -y 2026-04-13T17:48:28.010 DEBUG:teuthology.task.pexec:ubuntu@vm00.local< sudo dnf install nvmetcli nvme-cli -y 2026-04-13T17:48:28.010 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm00.local 2026-04-13T17:48:28.010 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-04-13T17:48:28.010 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-04-13T17:48:28.010 DEBUG:teuthology.task.pexec:ubuntu@vm01.local< sudo dnf remove nvme-cli -y 2026-04-13T17:48:28.010 DEBUG:teuthology.task.pexec:ubuntu@vm01.local< sudo dnf install nvmetcli nvme-cli -y 2026-04-13T17:48:28.010 INFO:teuthology.task.pexec:Running commands on host ubuntu@vm01.local 2026-04-13T17:48:28.010 INFO:teuthology.task.pexec:sudo dnf remove nvme-cli -y 2026-04-13T17:48:28.010 INFO:teuthology.task.pexec:sudo dnf install nvmetcli nvme-cli -y 2026-04-13T17:48:28.203 INFO:teuthology.orchestra.run.vm00.stdout:No match for argument: nvme-cli 2026-04-13T17:48:28.204 INFO:teuthology.orchestra.run.vm00.stderr:No packages marked for removal. 2026-04-13T17:48:28.204 INFO:teuthology.orchestra.run.vm01.stdout:No match for argument: nvme-cli 2026-04-13T17:48:28.204 INFO:teuthology.orchestra.run.vm01.stderr:No packages marked for removal. 2026-04-13T17:48:28.206 INFO:teuthology.orchestra.run.vm00.stdout:Dependencies resolved. 2026-04-13T17:48:28.207 INFO:teuthology.orchestra.run.vm00.stdout:Nothing to do. 2026-04-13T17:48:28.207 INFO:teuthology.orchestra.run.vm00.stdout:Complete! 2026-04-13T17:48:28.207 INFO:teuthology.orchestra.run.vm01.stdout:Dependencies resolved. 2026-04-13T17:48:28.208 INFO:teuthology.orchestra.run.vm01.stdout:Nothing to do. 2026-04-13T17:48:28.209 INFO:teuthology.orchestra.run.vm01.stdout:Complete! 2026-04-13T17:48:28.615 INFO:teuthology.orchestra.run.vm00.stdout:Last metadata expiration check: 0:00:57 ago on Mon 13 Apr 2026 05:47:31 PM UTC. 2026-04-13T17:48:28.616 INFO:teuthology.orchestra.run.vm01.stdout:Last metadata expiration check: 0:01:00 ago on Mon 13 Apr 2026 05:47:28 PM UTC. 2026-04-13T17:48:28.720 INFO:teuthology.orchestra.run.vm01.stdout:Dependencies resolved. 2026-04-13T17:48:28.720 INFO:teuthology.orchestra.run.vm00.stdout:Dependencies resolved. 2026-04-13T17:48:28.720 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-04-13T17:48:28.720 INFO:teuthology.orchestra.run.vm01.stdout: Package Architecture Version Repository Size 2026-04-13T17:48:28.720 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:Installing: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:Installing dependencies: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:Transaction Summary 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:================================================================================ 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:Install 6 Packages 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:Total download size: 2.3 M 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:Installed size: 11 M 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm01.stdout:Downloading Packages: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:================================================================================ 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: Package Architecture Version Repository Size 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:================================================================================ 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:Installing: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: nvme-cli x86_64 2.16-1.el9 baseos 1.2 M 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: nvmetcli noarch 0.8-3.el9 baseos 44 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:Installing dependencies: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: python3-configshell noarch 1:1.1.30-1.el9 baseos 72 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: python3-kmod x86_64 0.9-32.el9 baseos 84 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: python3-pyparsing noarch 2.4.7-9.el9 baseos 150 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: python3-urwid x86_64 2.1.2-4.el9 baseos 837 k 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:Transaction Summary 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:================================================================================ 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:Install 6 Packages 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:Total download size: 2.3 M 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:Installed size: 11 M 2026-04-13T17:48:28.721 INFO:teuthology.orchestra.run.vm00.stdout:Downloading Packages: 2026-04-13T17:48:29.189 INFO:teuthology.orchestra.run.vm01.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 135 kB/s | 44 kB 00:00 2026-04-13T17:48:29.204 INFO:teuthology.orchestra.run.vm01.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 212 kB/s | 72 kB 00:00 2026-04-13T17:48:29.218 INFO:teuthology.orchestra.run.vm00.stdout:(1/6): nvmetcli-0.8-3.el9.noarch.rpm 124 kB/s | 44 kB 00:00 2026-04-13T17:48:29.256 INFO:teuthology.orchestra.run.vm00.stdout:(2/6): python3-configshell-1.1.30-1.el9.noarch. 184 kB/s | 72 kB 00:00 2026-04-13T17:48:29.365 INFO:teuthology.orchestra.run.vm01.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 481 kB/s | 84 kB 00:00 2026-04-13T17:48:29.396 INFO:teuthology.orchestra.run.vm01.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 783 kB/s | 150 kB 00:00 2026-04-13T17:48:29.407 INFO:teuthology.orchestra.run.vm00.stdout:(3/6): python3-kmod-0.9-32.el9.x86_64.rpm 446 kB/s | 84 kB 00:00 2026-04-13T17:48:29.421 INFO:teuthology.orchestra.run.vm00.stdout:(4/6): python3-pyparsing-2.4.7-9.el9.noarch.rpm 914 kB/s | 150 kB 00:00 2026-04-13T17:48:29.494 INFO:teuthology.orchestra.run.vm00.stdout:(5/6): nvme-cli-2.16-1.el9.x86_64.rpm 1.8 MB/s | 1.2 MB 00:00 2026-04-13T17:48:29.696 INFO:teuthology.orchestra.run.vm01.stdout:(5/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 2.5 MB/s | 837 kB 00:00 2026-04-13T17:48:29.743 INFO:teuthology.orchestra.run.vm01.stdout:(6/6): nvme-cli-2.16-1.el9.x86_64.rpm 1.3 MB/s | 1.2 MB 00:00 2026-04-13T17:48:29.743 INFO:teuthology.orchestra.run.vm01.stdout:-------------------------------------------------------------------------------- 2026-04-13T17:48:29.743 INFO:teuthology.orchestra.run.vm01.stdout:Total 2.3 MB/s | 2.3 MB 00:01 2026-04-13T17:48:29.747 INFO:teuthology.orchestra.run.vm00.stdout:(6/6): python3-urwid-2.1.2-4.el9.x86_64.rpm 2.4 MB/s | 837 kB 00:00 2026-04-13T17:48:29.750 INFO:teuthology.orchestra.run.vm00.stdout:-------------------------------------------------------------------------------- 2026-04-13T17:48:29.750 INFO:teuthology.orchestra.run.vm00.stdout:Total 2.2 MB/s | 2.3 MB 00:01 2026-04-13T17:48:29.817 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction check 2026-04-13T17:48:29.817 INFO:teuthology.orchestra.run.vm00.stdout:Running transaction check 2026-04-13T17:48:29.824 INFO:teuthology.orchestra.run.vm01.stdout:Transaction check succeeded. 2026-04-13T17:48:29.824 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction test 2026-04-13T17:48:29.826 INFO:teuthology.orchestra.run.vm00.stdout:Transaction check succeeded. 2026-04-13T17:48:29.826 INFO:teuthology.orchestra.run.vm00.stdout:Running transaction test 2026-04-13T17:48:29.881 INFO:teuthology.orchestra.run.vm01.stdout:Transaction test succeeded. 2026-04-13T17:48:29.881 INFO:teuthology.orchestra.run.vm01.stdout:Running transaction 2026-04-13T17:48:29.883 INFO:teuthology.orchestra.run.vm00.stdout:Transaction test succeeded. 2026-04-13T17:48:29.883 INFO:teuthology.orchestra.run.vm00.stdout:Running transaction 2026-04-13T17:48:30.042 INFO:teuthology.orchestra.run.vm00.stdout: Preparing : 1/1 2026-04-13T17:48:30.052 INFO:teuthology.orchestra.run.vm00.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-04-13T17:48:30.055 INFO:teuthology.orchestra.run.vm01.stdout: Preparing : 1/1 2026-04-13T17:48:30.067 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-urwid-2.1.2-4.el9.x86_64 1/6 2026-04-13T17:48:30.067 INFO:teuthology.orchestra.run.vm00.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-04-13T17:48:30.077 INFO:teuthology.orchestra.run.vm00.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-13T17:48:30.079 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-pyparsing-2.4.7-9.el9.noarch 2/6 2026-04-13T17:48:30.087 INFO:teuthology.orchestra.run.vm00.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-13T17:48:30.089 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-13T17:48:30.089 INFO:teuthology.orchestra.run.vm00.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-04-13T17:48:30.099 INFO:teuthology.orchestra.run.vm01.stdout: Installing : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-13T17:48:30.101 INFO:teuthology.orchestra.run.vm01.stdout: Installing : nvmetcli-0.8-3.el9.noarch 5/6 2026-04-13T17:48:30.247 INFO:teuthology.orchestra.run.vm00.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-04-13T17:48:30.252 INFO:teuthology.orchestra.run.vm00.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-13T17:48:30.264 INFO:teuthology.orchestra.run.vm01.stdout: Running scriptlet: nvmetcli-0.8-3.el9.noarch 5/6 2026-04-13T17:48:30.269 INFO:teuthology.orchestra.run.vm01.stdout: Installing : nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-13T17:48:30.583 INFO:teuthology.orchestra.run.vm01.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-13T17:48:30.583 INFO:teuthology.orchestra.run.vm01.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-04-13T17:48:30.583 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:48:30.618 INFO:teuthology.orchestra.run.vm00.stdout: Running scriptlet: nvme-cli-2.16-1.el9.x86_64 6/6 2026-04-13T17:48:30.618 INFO:teuthology.orchestra.run.vm00.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /usr/lib/systemd/system/nvmefc-boot-connections.service. 2026-04-13T17:48:30.618 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:48:30.972 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-04-13T17:48:30.972 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-04-13T17:48:30.972 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-13T17:48:30.972 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-13T17:48:30.972 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-04-13T17:48:30.983 INFO:teuthology.orchestra.run.vm00.stdout: Verifying : nvme-cli-2.16-1.el9.x86_64 1/6 2026-04-13T17:48:30.983 INFO:teuthology.orchestra.run.vm00.stdout: Verifying : nvmetcli-0.8-3.el9.noarch 2/6 2026-04-13T17:48:30.983 INFO:teuthology.orchestra.run.vm00.stdout: Verifying : python3-configshell-1:1.1.30-1.el9.noarch 3/6 2026-04-13T17:48:30.984 INFO:teuthology.orchestra.run.vm00.stdout: Verifying : python3-kmod-0.9-32.el9.x86_64 4/6 2026-04-13T17:48:30.984 INFO:teuthology.orchestra.run.vm00.stdout: Verifying : python3-pyparsing-2.4.7-9.el9.noarch 5/6 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout:Installed: 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:48:31.035 INFO:teuthology.orchestra.run.vm01.stdout:Complete! 2026-04-13T17:48:31.041 INFO:teuthology.orchestra.run.vm00.stdout: Verifying : python3-urwid-2.1.2-4.el9.x86_64 6/6 2026-04-13T17:48:31.041 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:48:31.041 INFO:teuthology.orchestra.run.vm00.stdout:Installed: 2026-04-13T17:48:31.042 INFO:teuthology.orchestra.run.vm00.stdout: nvme-cli-2.16-1.el9.x86_64 nvmetcli-0.8-3.el9.noarch 2026-04-13T17:48:31.042 INFO:teuthology.orchestra.run.vm00.stdout: python3-configshell-1:1.1.30-1.el9.noarch python3-kmod-0.9-32.el9.x86_64 2026-04-13T17:48:31.042 INFO:teuthology.orchestra.run.vm00.stdout: python3-pyparsing-2.4.7-9.el9.noarch python3-urwid-2.1.2-4.el9.x86_64 2026-04-13T17:48:31.042 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:48:31.042 INFO:teuthology.orchestra.run.vm00.stdout:Complete! 2026-04-13T17:48:31.080 DEBUG:teuthology.parallel:result is None 2026-04-13T17:48:31.091 DEBUG:teuthology.parallel:result is None 2026-04-13T17:48:31.092 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-04-13T17:48:31.095 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-04-13T17:48:31.095 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:48:31.095 DEBUG:teuthology.orchestra.run.vm00:> dd if=/scratch_devs of=/dev/stdout 2026-04-13T17:48:31.116 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-13T17:48:31.117 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vg_nvme/lv_1 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 967 Links: 1 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:48:30.869138782 +0000 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:30.716138459 +0000 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:30.717138461 +0000 2026-04-13T17:48:31.185 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:30.716138459 +0000 2026-04-13T17:48:31.185 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-13T17:48:31.258 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:48:31.259 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:48:31.259 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000136224 s, 3.8 MB/s 2026-04-13T17:48:31.259 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-13T17:48:31.320 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vg_nvme/lv_2 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 962 Links: 1 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:48:30.870138784 +0000 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:30.716138459 +0000 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:30.716138459 +0000 2026-04-13T17:48:31.382 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:30.716138459 +0000 2026-04-13T17:48:31.383 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-13T17:48:31.449 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:48:31.449 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:48:31.449 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000150673 s, 3.4 MB/s 2026-04-13T17:48:31.449 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-13T17:48:31.512 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vg_nvme/lv_3 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 975 Links: 1 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:48:30.870138784 +0000 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:30.718138463 +0000 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:30.718138463 +0000 2026-04-13T17:48:31.575 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:30.718138463 +0000 2026-04-13T17:48:31.575 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-13T17:48:31.644 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:48:31.644 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:48:31.644 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000174497 s, 2.9 MB/s 2026-04-13T17:48:31.645 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-13T17:48:31.710 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/vg_nvme/lv_4 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 955 Links: 1 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:48:30.870138784 +0000 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:30.711138448 +0000 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:30.711138448 +0000 2026-04-13T17:48:31.768 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:30.711138448 +0000 2026-04-13T17:48:31.768 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-13T17:48:31.837 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:48:31.837 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:48:31.837 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000150862 s, 3.4 MB/s 2026-04-13T17:48:31.838 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-13T17:48:31.898 DEBUG:teuthology.orchestra.run.vm00:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-13T17:48:32.028 INFO:teuthology.orchestra.run.vm00.stdout:loop 2026-04-13T17:48:32.029 INFO:tasks.nvme_loop:Connecting nvme_loop vm00:/dev/vg_nvme/lv_1... 2026-04-13T17:48:32.029 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-13T17:48:32.064 INFO:teuthology.orchestra.run.vm00.stdout:1 2026-04-13T17:48:32.095 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vg_nvme/lv_11 2026-04-13T17:48:32.121 INFO:teuthology.orchestra.run.vm00.stdout:connecting to device: nvme0 2026-04-13T17:48:32.124 INFO:tasks.nvme_loop:Connecting nvme_loop vm00:/dev/vg_nvme/lv_2... 2026-04-13T17:48:32.124 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-13T17:48:32.202 INFO:teuthology.orchestra.run.vm00.stdout:1 2026-04-13T17:48:32.235 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vg_nvme/lv_21 2026-04-13T17:48:32.266 INFO:teuthology.orchestra.run.vm00.stdout:connecting to device: nvme1 2026-04-13T17:48:32.268 INFO:tasks.nvme_loop:Connecting nvme_loop vm00:/dev/vg_nvme/lv_3... 2026-04-13T17:48:32.268 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-13T17:48:32.349 INFO:teuthology.orchestra.run.vm00.stdout:1 2026-04-13T17:48:32.386 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vg_nvme/lv_31 2026-04-13T17:48:32.417 INFO:teuthology.orchestra.run.vm00.stdout:connecting to device: nvme2 2026-04-13T17:48:32.418 INFO:tasks.nvme_loop:Connecting nvme_loop vm00:/dev/vg_nvme/lv_4... 2026-04-13T17:48:32.419 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-13T17:48:32.496 INFO:teuthology.orchestra.run.vm00.stdout:1 2026-04-13T17:48:32.527 INFO:teuthology.orchestra.run.vm00.stdout:/dev/vg_nvme/lv_41 2026-04-13T17:48:32.557 INFO:teuthology.orchestra.run.vm00.stdout:connecting to device: nvme3 2026-04-13T17:48:32.560 DEBUG:teuthology.orchestra.run.vm00:> lsblk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:sr0 11:0 1 366K 0 rom 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:vda 252:0 0 40G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:└─vda1 252:1 0 40G 0 part / 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:vdb 252:16 0 20G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:vdc 252:32 0 20G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:vdd 252:48 0 20G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:vde 252:64 0 20G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:nvme0n1 259:1 0 20G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:nvme1n1 259:2 0 20G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:nvme2n1 259:4 0 20G 0 disk 2026-04-13T17:48:32.618 INFO:teuthology.orchestra.run.vm00.stdout:nvme3n1 259:6 0 20G 0 disk 2026-04-13T17:48:32.619 DEBUG:teuthology.orchestra.run.vm00:> sudo nvme list -o json 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "Devices":[ 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "NameSpace":1, 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "DevicePath":"/dev/nvme0n1", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "GenericPath":"/dev/ng0n1", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "SerialNumber":"8b2c1f5b0b2f0e200b07", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "SectorSize":512 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "NameSpace":1, 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "DevicePath":"/dev/nvme1n1", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "GenericPath":"/dev/ng1n1", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:32.691 INFO:teuthology.orchestra.run.vm00.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "SerialNumber":"3341fccde0b3848ce18b", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "SectorSize":512 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "NameSpace":1, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "DevicePath":"/dev/nvme2n1", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "GenericPath":"/dev/ng2n1", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "SerialNumber":"26e9e0aced76c789d80f", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "SectorSize":512 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: }, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: { 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "NameSpace":1, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "DevicePath":"/dev/nvme3n1", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "GenericPath":"/dev/ng3n1", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "SerialNumber":"c2bf6a0108b89cd03010", 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: "SectorSize":512 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: } 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-04-13T17:48:32.692 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-04-13T17:48:32.693 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-13T17:48:32.771 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:32.771 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:32.771 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00631714 s, 648 kB/s 2026-04-13T17:48:32.772 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-13T17:48:32.839 INFO:teuthology.orchestra.run.vm00.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:32.839 INFO:teuthology.orchestra.run.vm00.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:32.839 INFO:teuthology.orchestra.run.vm00.stdout:00000016 2026-04-13T17:48:32.840 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:32.914 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:32.914 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:32.914 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00769474 s, 532 kB/s 2026-04-13T17:48:32.919 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-13T17:48:32.985 INFO:teuthology.orchestra.run.vm00.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:32.985 INFO:teuthology.orchestra.run.vm00.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:32.985 INFO:teuthology.orchestra.run.vm00.stdout:40000016 2026-04-13T17:48:32.986 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:33.057 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:33.057 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:33.057 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00484618 s, 845 kB/s 2026-04-13T17:48:33.058 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-13T17:48:33.126 INFO:teuthology.orchestra.run.vm00.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:33.127 INFO:teuthology.orchestra.run.vm00.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:33.127 INFO:teuthology.orchestra.run.vm00.stdout:280000016 2026-04-13T17:48:33.127 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-13T17:48:33.194 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:33.194 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:33.194 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00360574 s, 1.1 MB/s 2026-04-13T17:48:33.195 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-13T17:48:33.258 INFO:teuthology.orchestra.run.vm00.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:33.258 INFO:teuthology.orchestra.run.vm00.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:33.258 INFO:teuthology.orchestra.run.vm00.stdout:00000016 2026-04-13T17:48:33.259 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:33.328 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:33.328 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:33.328 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00357043 s, 1.1 MB/s 2026-04-13T17:48:33.329 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-13T17:48:33.392 INFO:teuthology.orchestra.run.vm00.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:33.392 INFO:teuthology.orchestra.run.vm00.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:33.392 INFO:teuthology.orchestra.run.vm00.stdout:40000016 2026-04-13T17:48:33.393 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:33.462 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:33.462 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:33.462 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00392988 s, 1.0 MB/s 2026-04-13T17:48:33.463 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-13T17:48:33.529 INFO:teuthology.orchestra.run.vm00.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:33.529 INFO:teuthology.orchestra.run.vm00.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:33.529 INFO:teuthology.orchestra.run.vm00.stdout:280000016 2026-04-13T17:48:33.531 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-13T17:48:33.603 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:33.603 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:33.603 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00500013 s, 819 kB/s 2026-04-13T17:48:33.605 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-13T17:48:33.672 INFO:teuthology.orchestra.run.vm00.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:33.672 INFO:teuthology.orchestra.run.vm00.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:33.672 INFO:teuthology.orchestra.run.vm00.stdout:00000016 2026-04-13T17:48:33.673 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:33.743 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:33.743 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:33.743 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00358783 s, 1.1 MB/s 2026-04-13T17:48:33.744 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-13T17:48:33.812 INFO:teuthology.orchestra.run.vm00.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:33.812 INFO:teuthology.orchestra.run.vm00.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:33.813 INFO:teuthology.orchestra.run.vm00.stdout:40000016 2026-04-13T17:48:33.813 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:33.885 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:33.885 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:33.885 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00500063 s, 819 kB/s 2026-04-13T17:48:33.886 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-13T17:48:33.956 INFO:teuthology.orchestra.run.vm00.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:33.956 INFO:teuthology.orchestra.run.vm00.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:33.956 INFO:teuthology.orchestra.run.vm00.stdout:280000016 2026-04-13T17:48:33.957 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-13T17:48:34.026 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:34.026 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:34.026 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00421982 s, 971 kB/s 2026-04-13T17:48:34.027 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-13T17:48:34.094 INFO:teuthology.orchestra.run.vm00.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:34.094 INFO:teuthology.orchestra.run.vm00.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:34.094 INFO:teuthology.orchestra.run.vm00.stdout:00000016 2026-04-13T17:48:34.095 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:34.160 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:34.161 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:34.161 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00370704 s, 1.1 MB/s 2026-04-13T17:48:34.164 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-13T17:48:34.226 INFO:teuthology.orchestra.run.vm00.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:34.226 INFO:teuthology.orchestra.run.vm00.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:34.226 INFO:teuthology.orchestra.run.vm00.stdout:40000016 2026-04-13T17:48:34.227 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:34.295 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records in 2026-04-13T17:48:34.296 INFO:teuthology.orchestra.run.vm00.stderr:4096+0 records out 2026-04-13T17:48:34.296 INFO:teuthology.orchestra.run.vm00.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00415078 s, 987 kB/s 2026-04-13T17:48:34.299 DEBUG:teuthology.orchestra.run.vm00:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-13T17:48:34.365 INFO:teuthology.orchestra.run.vm00.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:34.365 INFO:teuthology.orchestra.run.vm00.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:34.365 INFO:teuthology.orchestra.run.vm00.stdout:280000016 2026-04-13T17:48:34.366 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-13T17:48:34.366 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:48:34.366 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/scratch_devs 2026-04-13T17:48:34.436 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:48:34.436 DEBUG:teuthology.orchestra.run.vm01:> dd if=/scratch_devs of=/dev/stdout 2026-04-13T17:48:34.458 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-13T17:48:34.458 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_1 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 947 Links: 1 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:48:30.855947019 +0000 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:30.674946845 +0000 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:30.674946845 +0000 2026-04-13T17:48:34.516 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:30.674946845 +0000 2026-04-13T17:48:34.516 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-13T17:48:34.584 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:48:34.584 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:48:34.584 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.00014447 s, 3.5 MB/s 2026-04-13T17:48:34.586 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-13T17:48:34.644 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_2 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 961 Links: 1 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:48:30.855947019 +0000 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:30.678946849 +0000 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:30.678946849 +0000 2026-04-13T17:48:34.701 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:30.678946849 +0000 2026-04-13T17:48:34.701 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-13T17:48:34.764 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:48:34.764 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:48:34.764 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000169367 s, 3.0 MB/s 2026-04-13T17:48:34.765 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-13T17:48:34.820 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_3 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 968 Links: 1 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:48:30.856947020 +0000 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:30.682946853 +0000 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:30.682946853 +0000 2026-04-13T17:48:34.877 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:30.682946853 +0000 2026-04-13T17:48:34.877 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-13T17:48:34.941 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:48:34.941 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:48:34.941 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000140122 s, 3.7 MB/s 2026-04-13T17:48:34.942 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-13T17:48:34.999 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/vg_nvme/lv_4 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 957 Links: 1 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:device_t:s0 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:48:30.856947020 +0000 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:30.677946848 +0000 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:30.677946848 +0000 2026-04-13T17:48:35.056 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:30.677946848 +0000 2026-04-13T17:48:35.057 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-13T17:48:35.121 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:48:35.122 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:48:35.122 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000225582 s, 2.3 MB/s 2026-04-13T17:48:35.123 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-13T17:48:35.180 DEBUG:teuthology.orchestra.run.vm01:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-13T17:48:35.312 INFO:teuthology.orchestra.run.vm01.stdout:loop 2026-04-13T17:48:35.313 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_1... 2026-04-13T17:48:35.313 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-13T17:48:35.343 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-13T17:48:35.372 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_11 2026-04-13T17:48:35.403 INFO:teuthology.orchestra.run.vm01.stdout:connecting to device: nvme0 2026-04-13T17:48:35.403 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_2... 2026-04-13T17:48:35.403 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-13T17:48:35.477 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-13T17:48:35.505 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_21 2026-04-13T17:48:35.531 INFO:teuthology.orchestra.run.vm01.stdout:connecting to device: nvme1 2026-04-13T17:48:35.532 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_3... 2026-04-13T17:48:35.532 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-13T17:48:35.606 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-13T17:48:35.636 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_31 2026-04-13T17:48:35.665 INFO:teuthology.orchestra.run.vm01.stdout:connecting to device: nvme2 2026-04-13T17:48:35.666 INFO:tasks.nvme_loop:Connecting nvme_loop vm01:/dev/vg_nvme/lv_4... 2026-04-13T17:48:35.666 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-13T17:48:35.741 INFO:teuthology.orchestra.run.vm01.stdout:1 2026-04-13T17:48:35.772 INFO:teuthology.orchestra.run.vm01.stdout:/dev/vg_nvme/lv_41 2026-04-13T17:48:35.801 INFO:teuthology.orchestra.run.vm01.stdout:connecting to device: nvme3 2026-04-13T17:48:35.807 DEBUG:teuthology.orchestra.run.vm01:> lsblk 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:sr0 11:0 1 366K 0 rom 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:vda 252:0 0 40G 0 disk 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:└─vda1 252:1 0 40G 0 part / 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:vdb 252:16 0 20G 0 disk 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:vdc 252:32 0 20G 0 disk 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:vdd 252:48 0 20G 0 disk 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:vde 252:64 0 20G 0 disk 2026-04-13T17:48:35.866 INFO:teuthology.orchestra.run.vm01.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-13T17:48:35.867 INFO:teuthology.orchestra.run.vm01.stdout:nvme0n1 259:0 0 20G 0 disk 2026-04-13T17:48:35.867 INFO:teuthology.orchestra.run.vm01.stdout:nvme1n1 259:2 0 20G 0 disk 2026-04-13T17:48:35.867 INFO:teuthology.orchestra.run.vm01.stdout:nvme2n1 259:4 0 20G 0 disk 2026-04-13T17:48:35.867 INFO:teuthology.orchestra.run.vm01.stdout:nvme3n1 259:6 0 20G 0 disk 2026-04-13T17:48:35.867 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme list -o json 2026-04-13T17:48:35.932 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "Devices":[ 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace":1, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath":"/dev/nvme0n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "GenericPath":"/dev/ng0n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber":"9e712741119b9c9a8422", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize":512 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace":1, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath":"/dev/nvme1n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "GenericPath":"/dev/ng1n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber":"707733fc50b1c23686f1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize":512 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace":1, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath":"/dev/nvme2n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "GenericPath":"/dev/ng2n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber":"3680d36b7ca865845e0b", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize":512 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: }, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: { 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "NameSpace":1, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "DevicePath":"/dev/nvme3n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "GenericPath":"/dev/ng3n1", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "Firmware":"5.14.0-6", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "ModelNumber":"Linux", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SerialNumber":"1c49275885a38132c982", 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "UsedBytes":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "MaximumLBA":41934848, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "PhysicalSize":21470642176, 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: "SectorSize":512 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: } 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-04-13T17:48:35.933 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-04-13T17:48:35.934 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-13T17:48:36.000 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.000 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.000 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00393978 s, 1.0 MB/s 2026-04-13T17:48:36.002 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-13T17:48:36.066 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:36.066 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:36.067 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-13T17:48:36.068 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:36.136 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.136 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.136 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00356946 s, 1.1 MB/s 2026-04-13T17:48:36.140 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-13T17:48:36.202 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:36.202 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:36.202 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-13T17:48:36.203 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:36.272 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.273 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.273 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00542551 s, 755 kB/s 2026-04-13T17:48:36.274 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-13T17:48:36.337 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:36.337 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:36.337 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-13T17:48:36.338 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-13T17:48:36.405 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.405 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.405 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00346877 s, 1.2 MB/s 2026-04-13T17:48:36.406 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-13T17:48:36.471 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:36.471 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:36.471 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-13T17:48:36.472 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:36.538 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.538 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.538 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00440344 s, 930 kB/s 2026-04-13T17:48:36.540 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-13T17:48:36.603 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:36.603 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:36.603 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-13T17:48:36.604 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:36.671 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.671 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.671 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00441972 s, 927 kB/s 2026-04-13T17:48:36.672 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-13T17:48:36.734 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:36.734 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:36.734 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-13T17:48:36.735 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-13T17:48:36.799 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.799 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.800 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00335763 s, 1.2 MB/s 2026-04-13T17:48:36.800 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-13T17:48:36.863 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:36.863 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:36.863 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-13T17:48:36.864 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:36.934 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:36.934 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:36.934 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00474906 s, 862 kB/s 2026-04-13T17:48:36.936 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-13T17:48:37.003 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:37.003 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:37.003 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-13T17:48:37.004 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:37.076 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:37.076 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:37.076 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00482954 s, 848 kB/s 2026-04-13T17:48:37.078 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-13T17:48:37.142 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:37.142 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:37.142 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-13T17:48:37.144 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-13T17:48:37.215 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:37.215 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:37.215 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0035075 s, 1.2 MB/s 2026-04-13T17:48:37.216 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-13T17:48:37.282 INFO:teuthology.orchestra.run.vm01.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:37.282 INFO:teuthology.orchestra.run.vm01.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:37.282 INFO:teuthology.orchestra.run.vm01.stdout:00000016 2026-04-13T17:48:37.283 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-13T17:48:37.353 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:37.353 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:37.353 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00393567 s, 1.0 MB/s 2026-04-13T17:48:37.354 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-13T17:48:37.420 INFO:teuthology.orchestra.run.vm01.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:37.420 INFO:teuthology.orchestra.run.vm01.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:37.420 INFO:teuthology.orchestra.run.vm01.stdout:40000016 2026-04-13T17:48:37.422 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-13T17:48:37.494 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records in 2026-04-13T17:48:37.494 INFO:teuthology.orchestra.run.vm01.stderr:4096+0 records out 2026-04-13T17:48:37.494 INFO:teuthology.orchestra.run.vm01.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00421635 s, 971 kB/s 2026-04-13T17:48:37.495 DEBUG:teuthology.orchestra.run.vm01:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-13T17:48:37.565 INFO:teuthology.orchestra.run.vm01.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-13T17:48:37.565 INFO:teuthology.orchestra.run.vm01.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-13T17:48:37.565 INFO:teuthology.orchestra.run.vm01.stdout:280000016 2026-04-13T17:48:37.566 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-13T17:48:37.567 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:48:37.567 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/scratch_devs 2026-04-13T17:48:37.638 INFO:teuthology.run_tasks:Running task cephadm... 2026-04-13T17:48:37.695 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360', 'cephadm_binary_url': 'https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4'}} 2026-04-13T17:48:37.695 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-04-13T17:48:37.695 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 2026-04-13T17:48:37.695 INFO:tasks.cephadm:Cluster fsid is 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:48:37.696 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-04-13T17:48:37.696 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-04-13T17:48:37.696 INFO:tasks.cephadm:Monitor IPs: {'mon.vm00': '192.168.123.100', 'mon.vm01': '192.168.123.101'} 2026-04-13T17:48:37.696 INFO:tasks.cephadm:Normalizing hostnames... 2026-04-13T17:48:37.696 DEBUG:teuthology.orchestra.run.vm00:> sudo hostname $(hostname -s) 2026-04-13T17:48:37.724 DEBUG:teuthology.orchestra.run.vm01:> sudo hostname $(hostname -s) 2026-04-13T17:48:37.753 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm 2026-04-13T17:48:37.753 DEBUG:teuthology.orchestra.run.vm00:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-13T17:48:38.992 INFO:teuthology.orchestra.run.vm00.stdout:-rw-r--r--. 1 ubuntu ubuntu 1036391 Apr 13 17:48 /home/ubuntu/cephtest/cephadm 2026-04-13T17:48:38.992 DEBUG:teuthology.orchestra.run.vm01:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-13T17:48:40.189 INFO:teuthology.orchestra.run.vm01.stdout:-rw-r--r--. 1 ubuntu ubuntu 1036391 Apr 13 17:48 /home/ubuntu/cephtest/cephadm 2026-04-13T17:48:40.189 DEBUG:teuthology.orchestra.run.vm00:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-13T17:48:40.207 DEBUG:teuthology.orchestra.run.vm01:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-13T17:48:40.229 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 on all hosts... 2026-04-13T17:48:40.229 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 pull 2026-04-13T17:48:40.250 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 pull 2026-04-13T17:48:40.426 INFO:teuthology.orchestra.run.vm00.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4... 2026-04-13T17:48:40.444 INFO:teuthology.orchestra.run.vm01.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4... 2026-04-13T17:48:58.849 INFO:teuthology.orchestra.run.vm01.stdout:{ 2026-04-13T17:48:58.849 INFO:teuthology.orchestra.run.vm01.stdout: "ceph_version": "ceph version 20.2.0-18-g0d1a6d86d0e (0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360) tentacle (stable)", 2026-04-13T17:48:58.849 INFO:teuthology.orchestra.run.vm01.stdout: "image_id": "06443d8796ac19529b3ba318ec42582d9c2b2d4723de9d23b91a737b3de50367", 2026-04-13T17:48:58.849 INFO:teuthology.orchestra.run.vm01.stdout: "repo_digests": [ 2026-04-13T17:48:58.849 INFO:teuthology.orchestra.run.vm01.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132" 2026-04-13T17:48:58.849 INFO:teuthology.orchestra.run.vm01.stdout: ] 2026-04-13T17:48:58.849 INFO:teuthology.orchestra.run.vm01.stdout:} 2026-04-13T17:49:01.174 INFO:teuthology.orchestra.run.vm00.stdout:{ 2026-04-13T17:49:01.174 INFO:teuthology.orchestra.run.vm00.stdout: "ceph_version": "ceph version 20.2.0-18-g0d1a6d86d0e (0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360) tentacle (stable)", 2026-04-13T17:49:01.174 INFO:teuthology.orchestra.run.vm00.stdout: "image_id": "06443d8796ac19529b3ba318ec42582d9c2b2d4723de9d23b91a737b3de50367", 2026-04-13T17:49:01.174 INFO:teuthology.orchestra.run.vm00.stdout: "repo_digests": [ 2026-04-13T17:49:01.174 INFO:teuthology.orchestra.run.vm00.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132" 2026-04-13T17:49:01.174 INFO:teuthology.orchestra.run.vm00.stdout: ] 2026-04-13T17:49:01.174 INFO:teuthology.orchestra.run.vm00.stdout:} 2026-04-13T17:49:01.188 DEBUG:teuthology.orchestra.run.vm00:> sudo mkdir -p /etc/ceph 2026-04-13T17:49:01.238 DEBUG:teuthology.orchestra.run.vm01:> sudo mkdir -p /etc/ceph 2026-04-13T17:49:01.265 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 777 /etc/ceph 2026-04-13T17:49:01.290 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 777 /etc/ceph 2026-04-13T17:49:01.332 INFO:tasks.cephadm:Writing seed config... 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-04-13T17:49:01.332 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-04-13T17:49:01.333 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:49:01.333 DEBUG:teuthology.orchestra.run.vm00:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-04-13T17:49:01.349 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 00063a34-3761-11f1-944c-abe11cccf0ff [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-04-13T17:49:01.349 DEBUG:teuthology.orchestra.run.vm00:mon.vm00> sudo journalctl -f -n 0 -u ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00.service 2026-04-13T17:49:01.390 INFO:tasks.cephadm:Bootstrapping... 2026-04-13T17:49:01.391 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 -v bootstrap --fsid 00063a34-3761-11f1-944c-abe11cccf0ff --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.100 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-04-13T17:49:01.549 INFO:teuthology.orchestra.run.vm00.stdout:-------------------------------------------------------------------------------- 2026-04-13T17:49:01.549 INFO:teuthology.orchestra.run.vm00.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4', '-v', 'bootstrap', '--fsid', '00063a34-3761-11f1-944c-abe11cccf0ff', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.100', '--skip-admin-label'] 2026-04-13T17:49:01.549 INFO:teuthology.orchestra.run.vm00.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-04-13T17:49:01.549 INFO:teuthology.orchestra.run.vm00.stdout:Verifying podman|docker is present... 2026-04-13T17:49:01.574 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stdout 5.8.0 2026-04-13T17:49:01.574 INFO:teuthology.orchestra.run.vm00.stdout:Verifying lvm2 is present... 2026-04-13T17:49:01.574 INFO:teuthology.orchestra.run.vm00.stdout:Verifying time synchronization is in place... 2026-04-13T17:49:01.582 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-13T17:49:01.582 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-13T17:49:01.588 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-13T17:49:01.589 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout inactive 2026-04-13T17:49:01.595 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout enabled 2026-04-13T17:49:01.601 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout active 2026-04-13T17:49:01.601 INFO:teuthology.orchestra.run.vm00.stdout:Unit chronyd.service is enabled and running 2026-04-13T17:49:01.601 INFO:teuthology.orchestra.run.vm00.stdout:Repeating the final host check... 2026-04-13T17:49:01.621 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stdout 5.8.0 2026-04-13T17:49:01.621 INFO:teuthology.orchestra.run.vm00.stdout:podman (/bin/podman) version 5.8.0 is present 2026-04-13T17:49:01.621 INFO:teuthology.orchestra.run.vm00.stdout:systemctl is present 2026-04-13T17:49:01.621 INFO:teuthology.orchestra.run.vm00.stdout:lvcreate is present 2026-04-13T17:49:01.627 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-13T17:49:01.627 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-13T17:49:01.633 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-13T17:49:01.633 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout inactive 2026-04-13T17:49:01.640 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout enabled 2026-04-13T17:49:01.646 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout active 2026-04-13T17:49:01.646 INFO:teuthology.orchestra.run.vm00.stdout:Unit chronyd.service is enabled and running 2026-04-13T17:49:01.646 INFO:teuthology.orchestra.run.vm00.stdout:Host looks OK 2026-04-13T17:49:01.646 INFO:teuthology.orchestra.run.vm00.stdout:Cluster fsid: 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:01.646 INFO:teuthology.orchestra.run.vm00.stdout:Acquiring lock 140173242352016 on /run/cephadm/00063a34-3761-11f1-944c-abe11cccf0ff.lock 2026-04-13T17:49:01.646 INFO:teuthology.orchestra.run.vm00.stdout:Lock 140173242352016 acquired on /run/cephadm/00063a34-3761-11f1-944c-abe11cccf0ff.lock 2026-04-13T17:49:01.646 INFO:teuthology.orchestra.run.vm00.stdout:Verifying IP 192.168.123.100 port 3300 ... 2026-04-13T17:49:01.647 INFO:teuthology.orchestra.run.vm00.stdout:Verifying IP 192.168.123.100 port 6789 ... 2026-04-13T17:49:01.647 INFO:teuthology.orchestra.run.vm00.stdout:Base mon IP(s) is [192.168.123.100:3300, 192.168.123.100:6789], mon addrv is [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-04-13T17:49:01.650 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.100 metric 100 2026-04-13T17:49:01.650 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.100 metric 100 2026-04-13T17:49:01.653 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-04-13T17:49:01.653 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-04-13T17:49:01.656 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-04-13T17:49:01.656 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout inet6 ::1/128 scope host 2026-04-13T17:49:01.656 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-13T17:49:01.656 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout 2: eth0: mtu 1500 state UP qlen 1000 2026-04-13T17:49:01.656 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout inet6 fe80::5055:ff:fe00:0/64 scope link noprefixroute 2026-04-13T17:49:01.656 INFO:teuthology.orchestra.run.vm00.stdout:/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-13T17:49:01.657 INFO:teuthology.orchestra.run.vm00.stdout:Mon IP `192.168.123.100` is in CIDR network `192.168.123.0/24` 2026-04-13T17:49:01.657 INFO:teuthology.orchestra.run.vm00.stdout:Mon IP `192.168.123.100` is in CIDR network `192.168.123.0/24` 2026-04-13T17:49:01.657 INFO:teuthology.orchestra.run.vm00.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24'] 2026-04-13T17:49:01.657 INFO:teuthology.orchestra.run.vm00.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-04-13T17:49:01.657 INFO:teuthology.orchestra.run.vm00.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4... 2026-04-13T17:49:04.357 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stdout 06443d8796ac19529b3ba318ec42582d9c2b2d4723de9d23b91a737b3de50367 2026-04-13T17:49:04.357 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stderr Trying to pull harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4... 2026-04-13T17:49:04.357 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stderr Getting image source signatures 2026-04-13T17:49:04.357 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stderr Copying blob sha256:ba1b58d57e280003fcc6a51e23c76e8dd27ea3c591172798516fe6a7d5a14952 2026-04-13T17:49:04.357 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stderr Copying config sha256:06443d8796ac19529b3ba318ec42582d9c2b2d4723de9d23b91a737b3de50367 2026-04-13T17:49:04.357 INFO:teuthology.orchestra.run.vm00.stdout:/bin/podman: stderr Writing manifest to image destination 2026-04-13T17:49:04.602 INFO:teuthology.orchestra.run.vm00.stdout:ceph: stdout ceph version 20.2.0-18-g0d1a6d86d0e (0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360) tentacle (stable) 2026-04-13T17:49:04.602 INFO:teuthology.orchestra.run.vm00.stdout:Ceph version: ceph version 20.2.0-18-g0d1a6d86d0e (0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360) tentacle (stable) 2026-04-13T17:49:04.602 INFO:teuthology.orchestra.run.vm00.stdout:Extracting ceph user uid/gid from container image... 2026-04-13T17:49:04.724 INFO:teuthology.orchestra.run.vm00.stdout:stat: stdout 167 167 2026-04-13T17:49:04.724 INFO:teuthology.orchestra.run.vm00.stdout:Creating initial keys... 2026-04-13T17:49:04.846 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph-authtool: stdout AQCQLN1pTXfkMBAAusBHt1wXZpiRf7mjViGJjg== 2026-04-13T17:49:04.945 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph-authtool: stdout AQCQLN1pc10fNxAAqy3zd1eEV+b/pG/TLMsRpg== 2026-04-13T17:49:05.073 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph-authtool: stdout AQCRLN1pOtpKAxAAFNaoVE/JS1tXqReBM/IlAA== 2026-04-13T17:49:05.074 INFO:teuthology.orchestra.run.vm00.stdout:Creating initial monmap... 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = tentacle 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:monmaptool for vm00 [v2:192.168.123.100:3300,v1:192.168.123.100:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:setting min_mon_release = tentacle 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/monmaptool: set fsid to 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:49:05.216 INFO:teuthology.orchestra.run.vm00.stdout:Creating mon... 2026-04-13T17:49:05.662 INFO:teuthology.orchestra.run.vm00.stdout:create mon.vm00 on 2026-04-13T17:49:05.944 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-04-13T17:49:06.076 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-00063a34-3761-11f1-944c-abe11cccf0ff.target → /etc/systemd/system/ceph-00063a34-3761-11f1-944c-abe11cccf0ff.target. 2026-04-13T17:49:06.076 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-00063a34-3761-11f1-944c-abe11cccf0ff.target → /etc/systemd/system/ceph-00063a34-3761-11f1-944c-abe11cccf0ff.target. 2026-04-13T17:49:06.230 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00 2026-04-13T17:49:06.231 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Failed to reset failed state of unit ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00.service: Unit ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00.service not loaded. 2026-04-13T17:49:06.398 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-00063a34-3761-11f1-944c-abe11cccf0ff.target.wants/ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00.service → /etc/systemd/system/ceph-00063a34-3761-11f1-944c-abe11cccf0ff@.service. 2026-04-13T17:49:06.587 INFO:teuthology.orchestra.run.vm00.stdout:firewalld does not appear to be present 2026-04-13T17:49:06.587 INFO:teuthology.orchestra.run.vm00.stdout:Not possible to enable service . firewalld.service is not available 2026-04-13T17:49:06.587 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for mon to start... 2026-04-13T17:49:06.587 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for mon... 2026-04-13T17:49:06.641 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:06 vm00 ceph-mon[50867]: mkfs 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:06.645 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:06 vm00 ceph-mon[50867]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout cluster: 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout id: 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout services: 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm00 (age 0.266512s) [leader: vm00] 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout data: 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout pgs: 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:mon is available 2026-04-13T17:49:06.930 INFO:teuthology.orchestra.run.vm00.stdout:Assimilating anything we can from ceph.conf... 2026-04-13T17:49:07.237 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout [global] 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout fsid = 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout [osd] 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-13T17:49:07.238 INFO:teuthology.orchestra.run.vm00.stdout:Generating new minimal ceph.conf... 2026-04-13T17:49:07.540 INFO:teuthology.orchestra.run.vm00.stdout:Restarting the monitor... 2026-04-13T17:49:07.686 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 systemd[1]: Stopping Ceph mon.vm00 for 00063a34-3761-11f1-944c-abe11cccf0ff... 2026-04-13T17:49:07.686 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00[50863]: 2026-04-13T17:49:07.619+0000 7fa3cefd8640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm00 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-04-13T17:49:07.686 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00[50863]: 2026-04-13T17:49:07.619+0000 7fa3cefd8640 -1 mon.vm00@0(leader) e1 *** Got Signal Terminated *** 2026-04-13T17:49:07.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 podman[51077]: 2026-04-13 17:49:07.732872956 +0000 UTC m=+0.129265721 container died f22867ed5576cac20810356ce3de70ed44faaacad26ecdbd862cc2fb2586cce2 (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-04-13T17:49:07.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 podman[51077]: 2026-04-13 17:49:07.746251021 +0000 UTC m=+0.142643786 container remove f22867ed5576cac20810356ce3de70ed44faaacad26ecdbd862cc2fb2586cce2 (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True) 2026-04-13T17:49:07.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 bash[51077]: ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00 2026-04-13T17:49:07.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 systemd[1]: ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00.service: Deactivated successfully. 2026-04-13T17:49:07.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 systemd[1]: Stopped Ceph mon.vm00 for 00063a34-3761-11f1-944c-abe11cccf0ff. 2026-04-13T17:49:07.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 systemd[1]: Starting Ceph mon.vm00 for 00063a34-3761-11f1-944c-abe11cccf0ff... 2026-04-13T17:49:07.947 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 podman[51158]: 2026-04-13 17:49:07.916564535 +0000 UTC m=+0.019004814 container create 7acf6113c4c4285037ffbcce518cc86abcbbd8e9918e8cbd94cd7f4a1e2611c1 (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-04-13T17:49:07.966 INFO:teuthology.orchestra.run.vm00.stdout:Setting public_network to 192.168.123.0/24 in global config section 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 podman[51158]: 2026-04-13 17:49:07.951898028 +0000 UTC m=+0.054338307 container init 7acf6113c4c4285037ffbcce518cc86abcbbd8e9918e8cbd94cd7f4a1e2611c1 (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9) 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 podman[51158]: 2026-04-13 17:49:07.954547189 +0000 UTC m=+0.056987468 container start 7acf6113c4c4285037ffbcce518cc86abcbbd8e9918e8cbd94cd7f4a1e2611c1 (image=harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm00, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e) 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 bash[51158]: 7acf6113c4c4285037ffbcce518cc86abcbbd8e9918e8cbd94cd7f4a1e2611c1 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 podman[51158]: 2026-04-13 17:49:07.909825514 +0000 UTC m=+0.012265793 image pull 06443d8796ac19529b3ba318ec42582d9c2b2d4723de9d23b91a737b3de50367 harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 systemd[1]: Started Ceph mon.vm00 for 00063a34-3761-11f1-944c-abe11cccf0ff. 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: set uid:gid to 167:167 (ceph:ceph) 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: ceph version 20.2.0-18-g0d1a6d86d0e (0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360) tentacle (stable - RelWithDebInfo), process ceph-mon, pid 2 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: pidfile_write: ignore empty --pid-file 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: load: jerasure load: lrc 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: RocksDB version: 7.9.2 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Git sha 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Compile date 2026-04-13 11:21:09 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: DB SUMMARY 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: DB Session ID: 9QH4NMUMDZVDP59DMNI8 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: CURRENT file: CURRENT 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: IDENTITY file: IDENTITY 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm00/store.db dir, Total Num: 1, files: 000008.sst 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm00/store.db: 000009.log size: 76277 ; 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.error_if_exists: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.create_if_missing: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.paranoid_checks: 1 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.flush_verify_memtable_count: 1 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.env: 0x5567d8db8440 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.fs: PosixFileSystem 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.info_log: 0x5567d96e9300 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_file_opening_threads: 16 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.statistics: (nil) 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.use_fsync: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_log_file_size: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.log_file_time_to_roll: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.keep_log_file_num: 1000 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.recycle_log_file_num: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.allow_fallocate: 1 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.allow_mmap_reads: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.allow_mmap_writes: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.use_direct_reads: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.create_missing_column_families: 0 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.db_log_dir: 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.wal_dir: 2026-04-13T17:49:08.195 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.table_cache_numshardbits: 6 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.WAL_ttl_seconds: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.WAL_size_limit_MB: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.is_fd_close_on_exec: 1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.advise_random_on_open: 1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.db_write_buffer_size: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.write_buffer_manager: 0x5567d96ec500 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.use_adaptive_mutex: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.rate_limiter: (nil) 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.wal_recovery_mode: 2 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.enable_thread_tracking: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.enable_pipelined_write: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.unordered_write: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.row_cache: None 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.wal_filter: None 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.allow_ingest_behind: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.two_write_queues: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.manual_wal_flush: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.wal_compression: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.atomic_flush: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.persist_stats_to_disk: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.write_dbid_to_manifest: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.log_readahead_size: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.best_efforts_recovery: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.allow_data_in_errors: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.db_host_id: __hostname__ 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.enforce_single_del_contracts: true 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_background_jobs: 2 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_background_compactions: -1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_subcompactions: 1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.delayed_write_rate : 16777216 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_total_wal_size: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.stats_dump_period_sec: 600 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.stats_persist_period_sec: 600 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_open_files: -1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bytes_per_sync: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.wal_bytes_per_sync: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.strict_bytes_per_sync: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_readahead_size: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_background_flushes: -1 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Compression algorithms supported: 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kZSTD supported: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kXpressCompression supported: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kBZip2Compression supported: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-13T17:49:08.196 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kLZ4Compression supported: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kZlibCompression supported: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kLZ4HCCompression supported: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: kSnappyCompression supported: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Fast CRC32 supported: Supported on x86 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: DMutex implementation: pthread_mutex_t 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm00/store.db/MANIFEST-000010 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.merge_operator: 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_filter: None 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_filter_factory: None 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.sst_partitioner_factory: None 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.memtable_factory: SkipListFactory 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.table_factory: BlockBasedTable 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5567d96e8ec0) 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: cache_index_and_filter_blocks: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: pin_top_level_index_and_filter: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_type: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: data_block_index_type: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_shortening: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: data_block_hash_table_util_ratio: 0.750000 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: checksum: 4 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: no_block_cache: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache: 0x5567d96df8d0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_name: BinnedLRUCache 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_options: 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: capacity : 536870912 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: num_shard_bits : 4 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: strict_capacity_limit : 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: high_pri_pool_ratio: 0.000 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_cache_compressed: (nil) 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: persistent_cache: (nil) 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_size: 4096 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_size_deviation: 10 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_restart_interval: 16 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: index_block_restart_interval: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: metadata_block_size: 4096 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: partition_filters: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: use_delta_encoding: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: filter_policy: bloomfilter 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: whole_key_filtering: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: verify_compression: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: read_amp_bytes_per_bit: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: format_version: 5 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: enable_index_compression: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: block_align: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: max_auto_readahead_size: 262144 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: prepopulate_block_cache: 0 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: initial_auto_readahead_size: 8192 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout: num_file_reads_for_auto_readahead: 2 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.write_buffer_size: 33554432 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_write_buffer_number: 2 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression: NoCompression 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression: Disabled 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.prefix_extractor: nullptr 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.num_levels: 7 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-13T17:49:08.197 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.window_bits: -14 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.level: 32767 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.strategy: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.enabled: false 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.target_file_size_base: 67108864 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.target_file_size_multiplier: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.arena_block_size: 1048576 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.disable_auto_compactions: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.inplace_update_support: 0 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.inplace_update_num_locks: 10000 2026-04-13T17:49:08.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.memtable_huge_page_size: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.bloom_locality: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.max_successive_merges: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.optimize_filters_for_hits: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.paranoid_file_checks: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.force_consistency_checks: 1 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.report_bg_io_stats: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.ttl: 2592000 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.periodic_compaction_seconds: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.enable_blob_files: false 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.min_blob_size: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.blob_file_size: 268435456 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.blob_compression_type: NoCompression 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.enable_blob_garbage_collection: false 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.blob_file_starting_level: 0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm00/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 290f5c79-e995-44c2-a5f6-fc92b4ccd42a 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776102547976894, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776102547978413, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 73372, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 227, "table_properties": {"data_size": 71658, "index_size": 167, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 9666, "raw_average_key_size": 48, "raw_value_size": 66165, "raw_average_value_size": 334, "num_data_blocks": 8, "num_entries": 198, "num_filter_entries": 198, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1776102547, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "290f5c79-e995-44c2-a5f6-fc92b4ccd42a", "db_session_id": "9QH4NMUMDZVDP59DMNI8", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776102547978468, "job": 1, "event": "recovery_finished"} 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm00/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5567d970ae00 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: DB pointer 0x5567d9856000 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** DB Stats ** 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: L0 2/0 73.53 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Sum 2/0 73.53 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 53.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** Compaction Stats [default] ** 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 53.0 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-04-13T17:49:08.199 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: AddFile(Keys): cumulative 0, interval 0 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Cumulative compaction: 0.00 GB write, 13.62 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Interval compaction: 0.00 GB write, 13.62 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Block cache BinnedLRUCache@0x5567d96df8d0#2 capacity: 512.00 MB usage: 1.06 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1e-05 secs_since: 0 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(1,0.00 KB,0%) 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: ** File Read Latency Histogram By Level [default] ** 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: starting mon.vm00 rank 0 at public addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] at bind addrs [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon_data /var/lib/ceph/mon/ceph-vm00 fsid 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???) e1 preinit fsid 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???).mds e1 new map 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???).mds e1 print_map 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: e1 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: btime 2026-04-13T17:49:06:632051+0000 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: legacy client fscid: -1 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout: No filesystems configured 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:07 vm00 ceph-mon[51174]: mon.vm00@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: mon.vm00 is new leader, mons vm00 in quorum (ranks 0) 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: monmap epoch 1 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: fsid 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: last_changed 2026-04-13T17:49:05.198343+0000 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: created 2026-04-13T17:49:05.198343+0000 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: min_mon_release 20 (tentacle) 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: election_strategy: 1 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.vm00 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: fsmap 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: osdmap e1: 0 total, 0 up, 0 in 2026-04-13T17:49:08.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:08 vm00 ceph-mon[51174]: mgrmap e1: no daemons active 2026-04-13T17:49:08.372 INFO:teuthology.orchestra.run.vm00.stdout:Wrote config to /etc/ceph/ceph.conf 2026-04-13T17:49:08.372 INFO:teuthology.orchestra.run.vm00.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-04-13T17:49:08.372 INFO:teuthology.orchestra.run.vm00.stdout:Creating mgr... 2026-04-13T17:49:08.373 INFO:teuthology.orchestra.run.vm00.stdout:Verifying port 0.0.0.0:9283 ... 2026-04-13T17:49:08.373 INFO:teuthology.orchestra.run.vm00.stdout:Verifying port 0.0.0.0:8765 ... 2026-04-13T17:49:08.373 INFO:teuthology.orchestra.run.vm00.stdout:Verifying port 0.0.0.0:8443 ... 2026-04-13T17:49:08.532 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mgr.vm00.vrvkmc 2026-04-13T17:49:08.532 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Failed to reset failed state of unit ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mgr.vm00.vrvkmc.service: Unit ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mgr.vm00.vrvkmc.service not loaded. 2026-04-13T17:49:08.681 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-00063a34-3761-11f1-944c-abe11cccf0ff.target.wants/ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mgr.vm00.vrvkmc.service → /etc/systemd/system/ceph-00063a34-3761-11f1-944c-abe11cccf0ff@.service. 2026-04-13T17:49:08.851 INFO:teuthology.orchestra.run.vm00.stdout:firewalld does not appear to be present 2026-04-13T17:49:08.851 INFO:teuthology.orchestra.run.vm00.stdout:Not possible to enable service . firewalld.service is not available 2026-04-13T17:49:08.852 INFO:teuthology.orchestra.run.vm00.stdout:firewalld does not appear to be present 2026-04-13T17:49:08.852 INFO:teuthology.orchestra.run.vm00.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-04-13T17:49:08.852 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for mgr to start... 2026-04-13T17:49:08.852 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for mgr... 2026-04-13T17:49:09.161 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsid": "00063a34-3761-11f1-944c-abe11cccf0ff", 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "health": { 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 0 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "vm00" 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "btime": "2026-04-13T17:49:06:632051+0000", 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:09.162 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-13T17:49:09.163 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:09.163 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modified": "2026-04-13T17:49:06.632774+0000", 2026-04-13T17:49:09.163 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:09.163 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:09.163 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-13T17:49:09.163 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:09.163 INFO:teuthology.orchestra.run.vm00.stdout:mgr not available, waiting (1/15)... 2026-04-13T17:49:09.446 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:09 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/673996382' entity='client.admin' 2026-04-13T17:49:09.446 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:09 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3900348764' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-13T17:49:11.653 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:11.653 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:11.653 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsid": "00063a34-3761-11f1-944c-abe11cccf0ff", 2026-04-13T17:49:11.653 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "health": { 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 0 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "vm00" 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "btime": "2026-04-13T17:49:06:632051+0000", 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modified": "2026-04-13T17:49:06.632774+0000", 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:11.654 INFO:teuthology.orchestra.run.vm00.stdout:mgr not available, waiting (2/15)... 2026-04-13T17:49:11.740 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:11 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3167598340' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-13T17:49:14.002 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:14.002 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:14.002 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsid": "00063a34-3761-11f1-944c-abe11cccf0ff", 2026-04-13T17:49:14.002 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "health": { 2026-04-13T17:49:14.002 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 0 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "vm00" 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-13T17:49:14.003 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "btime": "2026-04-13T17:49:06:632051+0000", 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modified": "2026-04-13T17:49:06.632774+0000", 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:14.004 INFO:teuthology.orchestra.run.vm00.stdout:mgr not available, waiting (3/15)... 2026-04-13T17:49:14.021 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:14 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3436403937' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsid": "00063a34-3761-11f1-944c-abe11cccf0ff", 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "health": { 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 0 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "vm00" 2026-04-13T17:49:16.320 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_age": 8, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-13T17:49:16.321 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "btime": "2026-04-13T17:49:06:632051+0000", 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modified": "2026-04-13T17:49:06.632774+0000", 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:16.322 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-13T17:49:16.323 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:16.323 INFO:teuthology.orchestra.run.vm00.stdout:mgr not available, waiting (4/15)... 2026-04-13T17:49:16.569 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:16 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/703004972' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsid": "00063a34-3761-11f1-944c-abe11cccf0ff", 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "health": { 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 0 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "vm00" 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "btime": "2026-04-13T17:49:06:632051+0000", 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-13T17:49:18.635 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modified": "2026-04-13T17:49:06.632774+0000", 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:18.636 INFO:teuthology.orchestra.run.vm00.stdout:mgr not available, waiting (5/15)... 2026-04-13T17:49:18.836 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:18 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1515427175' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: Activating manager daemon vm00.vrvkmc 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: mgrmap e2: vm00.vrvkmc(active, starting, since 0.00530476s) 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mds metadata"} : dispatch 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata"} : dispatch 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata"} : dispatch 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr metadata", "who": "vm00.vrvkmc", "id": "vm00.vrvkmc"} : dispatch 2026-04-13T17:49:19.842 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: Manager daemon vm00.vrvkmc is now available 2026-04-13T17:49:19.843 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/mirror_snapshot_schedule"} : dispatch 2026-04-13T17:49:19.843 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:19.843 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:19.843 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:19.843 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:19 vm00 ceph-mon[51174]: from='mgr.14100 192.168.123.100:0/2201877134' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/trash_purge_schedule"} : dispatch 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsid": "00063a34-3761-11f1-944c-abe11cccf0ff", 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "health": { 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 0 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "vm00" 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "quorum_age": 13, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-13T17:49:21.096 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "btime": "2026-04-13T17:49:06:632051+0000", 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ], 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:21.097 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "modified": "2026-04-13T17:49:06.632774+0000", 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout }, 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:21.098 INFO:teuthology.orchestra.run.vm00.stdout:mgr is available 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout [global] 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout fsid = 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.100:3300,v1:192.168.123.100:6789] 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout [osd] 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-13T17:49:21.501 INFO:teuthology.orchestra.run.vm00.stdout:Enabling cephadm module... 2026-04-13T17:49:21.607 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:21 vm00 ceph-mon[51174]: mgrmap e3: vm00.vrvkmc(active, since 1.0103s) 2026-04-13T17:49:21.608 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:21 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/764655285' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-13T17:49:22.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:22 vm00 ceph-mon[51174]: mgrmap e4: vm00.vrvkmc(active, since 2s) 2026-04-13T17:49:22.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:22 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3674435599' entity='client.admin' cmd={"prefix": "config assimilate-conf"} : dispatch 2026-04-13T17:49:22.621 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:22 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2498841959' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "cephadm"} : dispatch 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "active_name": "vm00.vrvkmc", 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for the mgr to restart... 2026-04-13T17:49:22.969 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for mgr epoch 5... 2026-04-13T17:49:23.626 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:23 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2498841959' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-04-13T17:49:23.626 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:23 vm00 ceph-mon[51174]: mgrmap e5: vm00.vrvkmc(active, since 3s) 2026-04-13T17:49:23.626 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:23 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1797025432' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: Active manager daemon vm00.vrvkmc restarted 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: Activating manager daemon vm00.vrvkmc 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: osdmap e2: 0 total, 0 up, 0 in 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: mgrmap e6: vm00.vrvkmc(active, starting, since 0.00484654s) 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr metadata", "who": "vm00.vrvkmc", "id": "vm00.vrvkmc"} : dispatch 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mds metadata"} : dispatch 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata"} : dispatch 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata"} : dispatch 2026-04-13T17:49:33.332 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:33 vm00 ceph-mon[51174]: Manager daemon vm00.vrvkmc is now available 2026-04-13T17:49:34.099 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:34.100 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-04-13T17:49:34.100 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-13T17:49:34.100 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:34.100 INFO:teuthology.orchestra.run.vm00.stdout:mgr epoch 5 is available 2026-04-13T17:49:34.100 INFO:teuthology.orchestra.run.vm00.stdout:Verifying orchestrator module is enabled... 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: Found migration_current of "None". Setting to last migration. 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/mirror_snapshot_schedule"} : dispatch 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/trash_purge_schedule"} : dispatch 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:49:34.599 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:34 vm00 ceph-mon[51174]: mgrmap e7: vm00.vrvkmc(active, since 1.00952s) 2026-04-13T17:49:35.471 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stderr module 'orchestrator' is already enabled (always-on) 2026-04-13T17:49:35.471 INFO:teuthology.orchestra.run.vm00.stdout:Setting orchestrator backend to cephadm... 2026-04-13T17:49:35.603 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:35 vm00 ceph-mon[51174]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-13T17:49:35.603 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:35 vm00 ceph-mon[51174]: from='client.14128 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-13T17:49:35.603 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:35 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:34] ENGINE Bus STARTING 2026-04-13T17:49:35.603 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:35 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/739070316' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "orchestrator"} : dispatch 2026-04-13T17:49:35.603 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:35 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:49:36.318 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout value unchanged 2026-04-13T17:49:36.318 INFO:teuthology.orchestra.run.vm00.stdout:Generating ssh key... 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:34] ENGINE Serving on http://192.168.123.100:8765 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:34] ENGINE Serving on https://192.168.123.100:7150 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:34] ENGINE Bus STARTED 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:34] ENGINE Client ('192.168.123.100', 57742) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/739070316' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "orchestrator"}]': finished 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: mgrmap e8: vm00.vrvkmc(active, since 2s) 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:36.606 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:36 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:49:37.424 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDXMy0CWaUrcPo/X+ObyBtOU4EGBPMV5U0Jv3lfaUz5g/YxTqwNeDUBq6PLf5zVoPVLgMyT0F2JRuEVYeWMQpR3ngOnmTTuDPc0pMCWG+/KIg5LribLKbBB88n2Lej87CI/lfeoPgBbsPkkHqo//NYve96sibE7l4D4vdqvhbWVVj8MgmkapE1d/REwxHgb6nhwVUtuTKZ8Ko+SfGCQ2ILohITanSMGWKEgBk27tyt2f7oK1LcaUeygQYF6SfnpvFhv5kLZL0GICAWEf2rLl/BKGwIuaCmcQounTLodSARvyq6G3xhswUBLPIacsRd8HahVxCqhRJUjkUCo7t0xHXEwZJzYCqc9IfuACXTEcjhfyEskSdS3xw60XEJCDdjFfkpQD+/EXuQ7JlXw130o+kl3kOmSy5Bym/Azo5iKCai97csjojywoXyn5Oht+OTYBHkPBLEt/24Ifw4sbMGqlJ4XkRROu6cm9mPmO29OMamZADjjnWlLrOjh+AxdmIDkUW8= ceph-00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:49:37.424 INFO:teuthology.orchestra.run.vm00.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-04-13T17:49:37.424 INFO:teuthology.orchestra.run.vm00.stdout:Adding key to root@localhost authorized_keys... 2026-04-13T17:49:37.425 INFO:teuthology.orchestra.run.vm00.stdout:Adding host vm00... 2026-04-13T17:49:37.615 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:37 vm00 ceph-mon[51174]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:37.615 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:37 vm00 ceph-mon[51174]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:37.615 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:37 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:37.615 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:37 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:38.628 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:38 vm00 ceph-mon[51174]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:38.628 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:38 vm00 ceph-mon[51174]: Generating ssh key... 2026-04-13T17:49:38.628 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:38 vm00 ceph-mon[51174]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:39.631 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:39 vm00 ceph-mon[51174]: from='client.14146 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm00", "addr": "192.168.123.100", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:40.453 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Added host 'vm00' with addr '192.168.123.100' 2026-04-13T17:49:40.453 INFO:teuthology.orchestra.run.vm00.stdout:Deploying mon service with default placement... 2026-04-13T17:49:40.642 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:40 vm00 ceph-mon[51174]: Deploying cephadm binary to vm00 2026-04-13T17:49:40.642 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:40 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:40.642 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:40 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:49:40.917 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-04-13T17:49:40.917 INFO:teuthology.orchestra.run.vm00.stdout:Deploying mgr service with default placement... 2026-04-13T17:49:41.346 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-04-13T17:49:41.346 INFO:teuthology.orchestra.run.vm00.stdout:Deploying crash service with default placement... 2026-04-13T17:49:41.651 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:41 vm00 ceph-mon[51174]: Added host vm00 2026-04-13T17:49:41.651 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:41 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:41.651 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:41 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:41.782 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-04-13T17:49:41.782 INFO:teuthology.orchestra.run.vm00.stdout:Deploying ceph-exporter service with default placement... 2026-04-13T17:49:42.250 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-04-13T17:49:42.250 INFO:teuthology.orchestra.run.vm00.stdout:Deploying prometheus service with default placement... 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: Saving service mon spec with placement count:5 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: Saving service mgr spec with placement count:2 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:42.661 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:42 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:42.761 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-04-13T17:49:42.761 INFO:teuthology.orchestra.run.vm00.stdout:Deploying grafana service with default placement... 2026-04-13T17:49:43.200 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-04-13T17:49:43.200 INFO:teuthology.orchestra.run.vm00.stdout:Deploying node-exporter service with default placement... 2026-04-13T17:49:43.668 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-04-13T17:49:43.668 INFO:teuthology.orchestra.run.vm00.stdout:Deploying alertmanager service with default placement... 2026-04-13T17:49:43.694 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:43.695 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: Saving service crash spec with placement * 2026-04-13T17:49:43.695 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:43.695 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: Saving service ceph-exporter spec with placement * 2026-04-13T17:49:43.695 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:43.695 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:43.695 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:43.695 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:43 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:44.111 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-04-13T17:49:44.451 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:44 vm00 ceph-mon[51174]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:44.452 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:44 vm00 ceph-mon[51174]: Saving service prometheus spec with placement count:1 2026-04-13T17:49:44.452 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:44 vm00 ceph-mon[51174]: from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:44.452 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:44 vm00 ceph-mon[51174]: Saving service grafana spec with placement count:1 2026-04-13T17:49:44.452 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:44 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:44.452 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:44 vm00 ceph-mon[51174]: from='mgr.14124 192.168.123.100:0/4263865742' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:44.977 INFO:teuthology.orchestra.run.vm00.stdout:Enabling the dashboard module... 2026-04-13T17:49:45.498 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:45 vm00 ceph-mon[51174]: from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:45.498 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:45 vm00 ceph-mon[51174]: Saving service node-exporter spec with placement * 2026-04-13T17:49:45.498 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:45 vm00 ceph-mon[51174]: from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:45.498 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:45 vm00 ceph-mon[51174]: Saving service alertmanager spec with placement count:1 2026-04-13T17:49:45.498 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:45 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/870888879' entity='client.admin' 2026-04-13T17:49:45.498 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:45 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3788531285' entity='client.admin' 2026-04-13T17:49:45.499 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:45 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/941436180' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "dashboard"} : dispatch 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "active_name": "vm00.vrvkmc", 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for the mgr to restart... 2026-04-13T17:49:46.466 INFO:teuthology.orchestra.run.vm00.stdout:Waiting for mgr epoch 9... 2026-04-13T17:49:47.012 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:46 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/941436180' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-04-13T17:49:47.012 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:46 vm00 ceph-mon[51174]: mgrmap e9: vm00.vrvkmc(active, since 12s) 2026-04-13T17:49:47.012 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:46 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/124483546' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-13T17:49:56.541 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: Active manager daemon vm00.vrvkmc restarted 2026-04-13T17:49:56.541 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: Activating manager daemon vm00.vrvkmc 2026-04-13T17:49:56.541 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: osdmap e3: 0 total, 0 up, 0 in 2026-04-13T17:49:56.542 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: mgrmap e10: vm00.vrvkmc(active, starting, since 0.00669928s) 2026-04-13T17:49:56.542 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:49:56.542 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr metadata", "who": "vm00.vrvkmc", "id": "vm00.vrvkmc"} : dispatch 2026-04-13T17:49:56.542 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mds metadata"} : dispatch 2026-04-13T17:49:56.542 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata"} : dispatch 2026-04-13T17:49:56.542 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata"} : dispatch 2026-04-13T17:49:56.542 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:56 vm00 ceph-mon[51174]: Manager daemon vm00.vrvkmc is now available 2026-04-13T17:49:57.229 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout { 2026-04-13T17:49:57.229 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-04-13T17:49:57.229 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-13T17:49:57.229 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout } 2026-04-13T17:49:57.229 INFO:teuthology.orchestra.run.vm00.stdout:mgr epoch 9 is available 2026-04-13T17:49:57.229 INFO:teuthology.orchestra.run.vm00.stdout:Using certmgr to generate dashboard self-signed certificate... 2026-04-13T17:49:57.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:57 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:49:57.578 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:57 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/mirror_snapshot_schedule"} : dispatch 2026-04-13T17:49:57.578 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:57 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/trash_purge_schedule"} : dispatch 2026-04-13T17:49:57.578 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:57 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:57.578 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:57 vm00 ceph-mon[51174]: mgrmap e11: vm00.vrvkmc(active, since 1.0108s) 2026-04-13T17:49:58.031 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout {"cert": "-----BEGIN CERTIFICATE-----\nMIIE/zCCAuegAwIBAgIUPksn/ANq2IH4Gsz0LLoGDZpOZxgwDQYJKoZIhvcNAQEL\nBQAwFzEVMBMGA1UEAwwMY2VwaGFkbS1yb290MB4XDTI2MDQxMzE3NDk1N1oXDTI5\nMDQxMjE3NDk1N1owGjEYMBYGA1UEAwwPMTkyLjE2OC4xMjMuMTAwMIICIjANBgkq\nhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuTZhIInKyWrACp7OaTnxUK2WcJbsP22a\nYKi+PO5+kpxUkckoh5Vo3hHaa0zc9F2XH4JDNqvvP21c/np4q5ihA6oRXMMjTcc5\nU2nmfIx7yKAFRvztE6h+SvaH0iBY9sRAqRR7n0yIC9jQSZ3fsCPL/iqt3NLHAQx2\nQKpRKdQskx0EGyLPEbOnuNUY5ZaPTY9jLkORrtBeEhTyKxbnmzJhODL1SQGBXm6a\nQdncYTG1pXQWVPqnSO/edXsDNrP7niLRCthgVmdDyaET39LCtjzBIqA3vCb3+1l5\nWRGbsbyzMP1/3v9bCzgPKTY5mHJz9n/PwP6TGcvgLQb8yUIy1mAGwoEMzIu/WnEZ\nRxMR2Y5L4w+AmIK/g7JsS84BDpodRct9vV2iZyn432VujPWOevC5omWGED/Rnmc9\n7ea0sRIMKjjlv45aPZfn4NPPoC1B7JX5UDEag0vinsnwdNwLJMzuTBQK6DmYu8qh\nAvtcxWGlqf6tcUSrnYQp99zOZyscaf6zrCgM2rLfxkaVuLJ1EWzmdnshBkrKHfmz\nJwMQ9IzhoiN/9moMLPz/IdeVS0mDVQWAH61MOFF98m9T7NKnYmSl6yuRBlZv6sT1\nx+Fw28zuKk2zFLC0zNOH8ux8mEpv3QWHDyJXY2oCeWzVc7ytFZg0vTnUdSzyMkJG\nLifNWbeq0TcCAwEAAaNAMD4wLgYDVR0RBCcwJYIKdm0wMC5sb2NhbIIRZGFzaGJv\nYXJkX3NlcnZlcnOHBMCoe2QwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOC\nAgEAXohoPUF7CYLT0srR0PHuBm4vjF/XE7+7uhPcwu1IYhEpc1ybVzZIzeOr/rqV\n6dfHC9sDIuMpFR0eqxca1LRSBFp6nYxT1vkQPmARhz90n67bsntMF+1N5nufJrhe\nTAVjoE3sDQBGKWEVzQfvSuHur8elbxpjDs1/Y37hcdFNBS3catpHnWwV2IdjRuXA\nudgkKUo7PdWhl6M2hCvv1PNcsKrVSDkdMVYzJ/8W5lYPHa1k9vUAcqsFZs1OygxO\nRSOXY8/8n8EOTCRreN2JtKBaWeHYK72HGXdXZn82gl10y8hcoSSmknwd+fiBPmKC\nXqqrN/wvlFggRY3EXit8wVoZklWRvAyr8SmYlt0RbnnxxfNSgpEWOMB2oj/tJeyo\nlG9QyTdoaNZwshHPK4a3K817OJMh945BzypPqmVnsEGEW0bU5hfg8iaQeZgvoV1A\n+VWNinPZtr1II5du9lYEUrruQi2FIwpkjdbXLK5Lx/wvq9dMQsbEyCRbOTfCnsNT\nSMjSMIv3xB2FEaqd6cvQIKDOglAidTipaFLrwRlt9jN3iu8QgxRADoJ1i68xlcJB\n4SBuYbcZlCfeQv9a5PCBYb2AGh0opHtTEdpDaGtt/tqstiv2/oSQqJ1e0lKdsrsX\nVfsOSzMBQb+/Nv4DRGnequXoSI5pfrrWHDr+Z1eZ1QUyY54=\n-----END CERTIFICATE-----\n", "key": "-----BEGIN RSA PRIVATE KEY-----\nMIIJKQIBAAKCAgEAuTZhIInKyWrACp7OaTnxUK2WcJbsP22aYKi+PO5+kpxUkcko\nh5Vo3hHaa0zc9F2XH4JDNqvvP21c/np4q5ihA6oRXMMjTcc5U2nmfIx7yKAFRvzt\nE6h+SvaH0iBY9sRAqRR7n0yIC9jQSZ3fsCPL/iqt3NLHAQx2QKpRKdQskx0EGyLP\nEbOnuNUY5ZaPTY9jLkORrtBeEhTyKxbnmzJhODL1SQGBXm6aQdncYTG1pXQWVPqn\nSO/edXsDNrP7niLRCthgVmdDyaET39LCtjzBIqA3vCb3+1l5WRGbsbyzMP1/3v9b\nCzgPKTY5mHJz9n/PwP6TGcvgLQb8yUIy1mAGwoEMzIu/WnEZRxMR2Y5L4w+AmIK/\ng7JsS84BDpodRct9vV2iZyn432VujPWOevC5omWGED/Rnmc97ea0sRIMKjjlv45a\nPZfn4NPPoC1B7JX5UDEag0vinsnwdNwLJMzuTBQK6DmYu8qhAvtcxWGlqf6tcUSr\nnYQp99zOZyscaf6zrCgM2rLfxkaVuLJ1EWzmdnshBkrKHfmzJwMQ9IzhoiN/9moM\nLPz/IdeVS0mDVQWAH61MOFF98m9T7NKnYmSl6yuRBlZv6sT1x+Fw28zuKk2zFLC0\nzNOH8ux8mEpv3QWHDyJXY2oCeWzVc7ytFZg0vTnUdSzyMkJGLifNWbeq0TcCAwEA\nAQKCAgBTjeY8VR1QR2HSiLkKvJKSeHU1Xsi+hpYOBCIqf9dHEdKPIEYyAGPaRwiX\nvomxjiU2mD/1QpSAszgwL5UPqJ/+9LK3e+sJ1BYW/N/3kTLom9Q1pbXYV/t9mwFD\n+MbZsEiRXoW0pER9v7abd2bp4gRNqfhcdhRD81jHrG5IJglj5sOq8EWlMTy5hB0i\nugz1CvpN7TqkOCkwFBX5/W/H0m8MeOSyJkU0EkZx7bRffpr5RMksVhWwsFHWAwQ8\noe6h9QgEUgAICrcIl6vNHhosN3VTxx7NuXDHCiLV3T01bjESqVIOCnoKsiQYzfcm\nwH9zFU7mvJIlIZYFCjFxI/WmQ5TYVnePY9aojPy1XCcVidIOwbVDL1fqBe9APk5X\nipPn0VKKOxdnhQWb+wOTySur03qu62FI/zGxZlxrrJ4mGk3pJ4RTA+LIujxjP2bE\n9TGohT8T5+cffFgoedzR8l3SKUbFeY9WfmiDpvATT83nP8dnb03oDdRCR6Mp3JNL\nVNuxlYnIDo0I35/kfJzmbkcvMuQDUhrn/Gt44AxYP3VAaZ7KmTQR2gZWtUW4ENyV\njOb1XILTN2njd3oOSR9omRbEiwb4vbab4iXHB6bW4+l0vgI2kh2wituxxFvms63x\nDkmvCegsxBCV8GKnK2fmZbY9OSwmen95E7V3lV5vHX9ojGL9fQKCAQEA6pSM0gUR\nTWo6wcwveANUcIXrZ6lMJNdyGMG5vrAH/Mf1RcldAYTSMR8gD5nYzwk7gLaQPd1t\navIeCtGWIYKotmjez9ArzJfNME2J7hgQlukx0Ni4khp5hDrMB4pdwMmWqVywNklI\nRPpIhhHsylM5jn7XmhNk+aUXPeaqlWDzThskM7vCeNBW3L6q8r9c7z+5QdYEnBKX\nahM30ZppR1x6AknfN1DejH2qboZ2QSqsge8xcDq7cV2F3yiVkzvliLi8h0vPDPS4\nJIZFNbS+N47LIwiHodtaJXU1EnPoHYzv1ifqjgCjzI3A9RqqwsC/fm6f0rY+3NO1\ncyb0yK6M246DZQKCAQEAyh/TvSf3iKjm1/jpYzli5pY6ITqzpsRxD/Jr6WcDKyJ8\nwJeuuyPS8WQDZeSocUjSlzWKPyyBydbBDnXE96VV3+ce0SYRmEktA9UI1sgJDqgQ\ntDquAURjGHTa1BA+dHgEPRtcnH34RiMv2rtjOGUZ7UsmPBN5Byhkt2ZCTO3rKkqQ\nB7Pxh43AlbKS9S0v7MNFrz2Km+bdX5amTSLrRapjL8zAkivF1F4cIRXOuqoS+TGK\nJVEEnG/POUBNPP4EuaRfcwv2qiViPv3Cm/AzbOtW8CGA3+8uKbOFWktsY24asdFs\nfPuyjbji02P0+Gk7Bos+Jhzpprb7lUbwA+rnVwruawKCAQEAwtTeoRDUbhfs9UlB\no8wgNdUEz/Qs6eyHN0ivnVFXUPxDGtNEDB7k9SeE2XBOQqbIZP88jiV623h8uz+Y\nEf0TY212oeOkfSkXoybDiJ00KiNE3NEjezXB97FeBGVHuPe1yrTRJkV3YNL/qVr5\n8JFvHIr9R1sUYsRbqW4MAEWlzxl71/v6aZ2gh3dVRIjsB0pF9Ahti7NlTxTnJ4+4\nIS0NqRjx7F+ed9Fn3KSRU06+OK0RwsUFKgMjTVXH5aG54vSMoVK1hW08BBoBIboc\nwsg2r8JF5IjrNnFCwzDBeMm16Yj9pzOg2CRkPy8G5PpyEfCPjdMQIxkze5C/fm57\nT1/ugQKCAQEAmFMwEYfDRtxzTCQsTzj6Tei9WjmRFodI5cm+haG6g5F0DvjBGAvB\nd2X2IZ4XS3ikLIyO6VkzXtdXmvwqOrABbcISOXDFJc1P/WlZRfRuzvrdy9XFOPM+\n8zltiQ/M4Z1pJFwwiUduCf2OnPtHpUKcLXbLE89hEIHilUydfrh21+JldUipJ40u\nrCJuGhO54GaxZ4Wrph5wiw9PuSYtxMzJ/0YBgqc2afuam1yB9a8iqWYqREEwhmzE\n8NKC4wpUqUs1G9DSThvqVKqNNVPqjZU2abX2F817LrY6M3hP8QcTWxsG9ZICWEgT\nuVfFUEiN0Tqy1apIc/nG1BMn4uvu+GwmjQKCAQBDpg6RjLbba0DCdhNwRpoooNIY\nt4+kWMLozRH3yYXGPoiFyOpxRWgy1AL5Dfi/xOI1Y0Fww9yr3DOhXkfQJ92YGGqb\nBjC5QxA2lT9TTuCLvKDVer9/IVZAtM5EEN7lmDAPRH5z7lPAlcC63V+un5e1ys0i\naE+exmzM7KOfcBHdROn2lC9XUiRgfR/u4LTnY16REYmzxAeM7L+aRFcuXsnAoME5\nEgvlQOKyOFjC/1DS4ssoZcP6lsyxYJqPKA9sv1qqyRXqLhBe1lVns7UWm+BRUxs/\n4yaLmeok1qIGMX6o9PYzWOVu/a7tSdtKdCHH6olLDqrW9ZRWpnmS2d6NgClv\n-----END RSA PRIVATE KEY-----\n"} 2026-04-13T17:49:58.450 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout SSL certificate updated 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:57] ENGINE Bus STARTING 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: from='client.14174 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:57] ENGINE Serving on http://192.168.123.100:8765 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:57] ENGINE Serving on https://192.168.123.100:7150 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:57] ENGINE Bus STARTED 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: [13/Apr/2026:17:49:57] ENGINE Client ('192.168.123.100', 59894) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: from='client.14182 -' entity='client.admin' cmd=[{"prefix": "orch certmgr generate-certificates", "module_name": "dashboard", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:58.820 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:58 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:58.846 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout SSL certificate key updated 2026-04-13T17:49:58.847 INFO:teuthology.orchestra.run.vm00.stdout:Creating initial admin user... 2026-04-13T17:49:59.406 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$A4.CuT3r166o0LghJskNau/g.WkPITUxT58X3CvAfkSLN74QtzOYC", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1776102599, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-04-13T17:49:59.406 INFO:teuthology.orchestra.run.vm00.stdout:Fetching dashboard port number... 2026-04-13T17:49:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:59 vm00 ceph-mon[51174]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:59 vm00 ceph-mon[51174]: mgrmap e12: vm00.vrvkmc(active, since 2s) 2026-04-13T17:49:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:59 vm00 ceph-mon[51174]: from='client.14186 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:49:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:59 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:59.780 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:49:59 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:49:59.811 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stdout 8443 2026-04-13T17:49:59.811 INFO:teuthology.orchestra.run.vm00.stdout:firewalld does not appear to be present 2026-04-13T17:49:59.811 INFO:teuthology.orchestra.run.vm00.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-04-13T17:49:59.813 INFO:teuthology.orchestra.run.vm00.stdout:Ceph Dashboard is now available at: 2026-04-13T17:49:59.813 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:49:59.813 INFO:teuthology.orchestra.run.vm00.stdout: URL: https://vm00.local:8443/ 2026-04-13T17:49:59.813 INFO:teuthology.orchestra.run.vm00.stdout: User: admin 2026-04-13T17:49:59.813 INFO:teuthology.orchestra.run.vm00.stdout: Password: nmo043c8zl 2026-04-13T17:49:59.813 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:49:59.813 INFO:teuthology.orchestra.run.vm00.stdout:Saving cluster configuration to /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config directory 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout:Or, if you are only running a single cluster on this host: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout:Please consider enabling telemetry to help improve Ceph: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: ceph telemetry on 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout:For more information see: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:00.250 INFO:teuthology.orchestra.run.vm00.stdout:Bootstrap complete. 2026-04-13T17:50:00.259 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout static 2026-04-13T17:50:00.266 INFO:teuthology.orchestra.run.vm00.stdout:Non-zero exit code 3 from systemctl is-active logrotate 2026-04-13T17:50:00.267 INFO:teuthology.orchestra.run.vm00.stdout:systemctl: stdout inactive 2026-04-13T17:50:00.267 INFO:teuthology.orchestra.run.vm00.stdout:Enabling the logrotate.timer service to perform daily log rotation. 2026-04-13T17:50:00.432 INFO:tasks.cephadm:Fetching config... 2026-04-13T17:50:00.432 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:50:00.432 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-04-13T17:50:00.448 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-04-13T17:50:00.448 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:50:00.448 DEBUG:teuthology.orchestra.run.vm00:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-04-13T17:50:00.513 INFO:tasks.cephadm:Fetching mon keyring... 2026-04-13T17:50:00.513 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:50:00.513 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/keyring of=/dev/stdout 2026-04-13T17:50:00.582 INFO:tasks.cephadm:Fetching pub ssh key... 2026-04-13T17:50:00.582 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:50:00.582 DEBUG:teuthology.orchestra.run.vm00:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-04-13T17:50:00.637 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-04-13T17:50:00.638 DEBUG:teuthology.orchestra.run.vm00:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDXMy0CWaUrcPo/X+ObyBtOU4EGBPMV5U0Jv3lfaUz5g/YxTqwNeDUBq6PLf5zVoPVLgMyT0F2JRuEVYeWMQpR3ngOnmTTuDPc0pMCWG+/KIg5LribLKbBB88n2Lej87CI/lfeoPgBbsPkkHqo//NYve96sibE7l4D4vdqvhbWVVj8MgmkapE1d/REwxHgb6nhwVUtuTKZ8Ko+SfGCQ2ILohITanSMGWKEgBk27tyt2f7oK1LcaUeygQYF6SfnpvFhv5kLZL0GICAWEf2rLl/BKGwIuaCmcQounTLodSARvyq6G3xhswUBLPIacsRd8HahVxCqhRJUjkUCo7t0xHXEwZJzYCqc9IfuACXTEcjhfyEskSdS3xw60XEJCDdjFfkpQD+/EXuQ7JlXw130o+kl3kOmSy5Bym/Azo5iKCai97csjojywoXyn5Oht+OTYBHkPBLEt/24Ifw4sbMGqlJ4XkRROu6cm9mPmO29OMamZADjjnWlLrOjh+AxdmIDkUW8= ceph-00063a34-3761-11f1-944c-abe11cccf0ff' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-13T17:50:00.705 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:00 vm00 ceph-mon[51174]: from='client.14188 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:50:00.705 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:00 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/345123460' entity='client.admin' cmd={"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"} : dispatch 2026-04-13T17:50:00.705 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:00 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/650649509' entity='client.admin' 2026-04-13T17:50:00.718 INFO:teuthology.orchestra.run.vm00.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDXMy0CWaUrcPo/X+ObyBtOU4EGBPMV5U0Jv3lfaUz5g/YxTqwNeDUBq6PLf5zVoPVLgMyT0F2JRuEVYeWMQpR3ngOnmTTuDPc0pMCWG+/KIg5LribLKbBB88n2Lej87CI/lfeoPgBbsPkkHqo//NYve96sibE7l4D4vdqvhbWVVj8MgmkapE1d/REwxHgb6nhwVUtuTKZ8Ko+SfGCQ2ILohITanSMGWKEgBk27tyt2f7oK1LcaUeygQYF6SfnpvFhv5kLZL0GICAWEf2rLl/BKGwIuaCmcQounTLodSARvyq6G3xhswUBLPIacsRd8HahVxCqhRJUjkUCo7t0xHXEwZJzYCqc9IfuACXTEcjhfyEskSdS3xw60XEJCDdjFfkpQD+/EXuQ7JlXw130o+kl3kOmSy5Bym/Azo5iKCai97csjojywoXyn5Oht+OTYBHkPBLEt/24Ifw4sbMGqlJ4XkRROu6cm9mPmO29OMamZADjjnWlLrOjh+AxdmIDkUW8= ceph-00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:00.733 DEBUG:teuthology.orchestra.run.vm01:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDXMy0CWaUrcPo/X+ObyBtOU4EGBPMV5U0Jv3lfaUz5g/YxTqwNeDUBq6PLf5zVoPVLgMyT0F2JRuEVYeWMQpR3ngOnmTTuDPc0pMCWG+/KIg5LribLKbBB88n2Lej87CI/lfeoPgBbsPkkHqo//NYve96sibE7l4D4vdqvhbWVVj8MgmkapE1d/REwxHgb6nhwVUtuTKZ8Ko+SfGCQ2ILohITanSMGWKEgBk27tyt2f7oK1LcaUeygQYF6SfnpvFhv5kLZL0GICAWEf2rLl/BKGwIuaCmcQounTLodSARvyq6G3xhswUBLPIacsRd8HahVxCqhRJUjkUCo7t0xHXEwZJzYCqc9IfuACXTEcjhfyEskSdS3xw60XEJCDdjFfkpQD+/EXuQ7JlXw130o+kl3kOmSy5Bym/Azo5iKCai97csjojywoXyn5Oht+OTYBHkPBLEt/24Ifw4sbMGqlJ4XkRROu6cm9mPmO29OMamZADjjnWlLrOjh+AxdmIDkUW8= ceph-00063a34-3761-11f1-944c-abe11cccf0ff' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-13T17:50:00.770 INFO:teuthology.orchestra.run.vm01.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDXMy0CWaUrcPo/X+ObyBtOU4EGBPMV5U0Jv3lfaUz5g/YxTqwNeDUBq6PLf5zVoPVLgMyT0F2JRuEVYeWMQpR3ngOnmTTuDPc0pMCWG+/KIg5LribLKbBB88n2Lej87CI/lfeoPgBbsPkkHqo//NYve96sibE7l4D4vdqvhbWVVj8MgmkapE1d/REwxHgb6nhwVUtuTKZ8Ko+SfGCQ2ILohITanSMGWKEgBk27tyt2f7oK1LcaUeygQYF6SfnpvFhv5kLZL0GICAWEf2rLl/BKGwIuaCmcQounTLodSARvyq6G3xhswUBLPIacsRd8HahVxCqhRJUjkUCo7t0xHXEwZJzYCqc9IfuACXTEcjhfyEskSdS3xw60XEJCDdjFfkpQD+/EXuQ7JlXw130o+kl3kOmSy5Bym/Azo5iKCai97csjojywoXyn5Oht+OTYBHkPBLEt/24Ifw4sbMGqlJ4XkRROu6cm9mPmO29OMamZADjjnWlLrOjh+AxdmIDkUW8= ceph-00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:00.782 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-04-13T17:50:00.916 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:01.372 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-04-13T17:50:01.372 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-04-13T17:50:01.513 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:01.988 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm01 2026-04-13T17:50:01.988 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:50:01.988 DEBUG:teuthology.orchestra.run.vm01:> dd of=/etc/ceph/ceph.conf 2026-04-13T17:50:02.004 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:50:02.004 DEBUG:teuthology.orchestra.run.vm01:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:02.063 INFO:tasks.cephadm:Adding host vm01 to orchestrator... 2026-04-13T17:50:02.064 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch host add vm01 2026-04-13T17:50:02.203 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3453591299' entity='client.admin' 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"} : dispatch 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm00", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm00", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-13T17:50:02.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:02.578 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: Deploying daemon ceph-exporter.vm00 on vm00 2026-04-13T17:50:02.578 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='client.14196 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:50:02.578 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:02 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='client.14198 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm01", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: mgrmap e13: vm00.vrvkmc(active, since 6s) 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-13T17:50:04.221 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:03 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:05.065 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:04 vm00 ceph-mon[51174]: Deploying cephadm binary to vm01 2026-04-13T17:50:05.065 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:04 vm00 ceph-mon[51174]: Deploying daemon crash.vm00 on vm00 2026-04-13T17:50:05.065 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:04 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:05.065 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:04 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:05.065 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:04 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:05.065 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:04 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:05.090 INFO:teuthology.orchestra.run.vm00.stdout:Added host 'vm01' with addr '192.168.123.101' 2026-04-13T17:50:05.163 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch host ls --format=json 2026-04-13T17:50:05.298 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:05.664 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:05.664 INFO:teuthology.orchestra.run.vm00.stdout:[{"addr": "192.168.123.100", "hostname": "vm00", "labels": [], "status": ""}, {"addr": "192.168.123.101", "hostname": "vm01", "labels": [], "status": ""}] 2026-04-13T17:50:05.719 INFO:tasks.cephadm:Setting crush tunables to default 2026-04-13T17:50:05.719 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd crush tunables default 2026-04-13T17:50:05.857 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:06.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:05 vm00 ceph-mon[51174]: Deploying daemon node-exporter.vm00 on vm00 2026-04-13T17:50:06.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:05 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:06.936 INFO:teuthology.orchestra.run.vm00.stderr:adjusted tunables profile to default 2026-04-13T17:50:06.990 INFO:tasks.cephadm:Adding mon.vm00 on vm00 2026-04-13T17:50:06.990 INFO:tasks.cephadm:Adding mon.vm01 on vm01 2026-04-13T17:50:06.990 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch apply mon '2;vm00:192.168.123.100=vm00;vm01:192.168.123.101=vm01' 2026-04-13T17:50:07.133 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:07.133 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:07.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:06 vm00 ceph-mon[51174]: Added host vm01 2026-04-13T17:50:07.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:06 vm00 ceph-mon[51174]: from='client.14201 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:50:07.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:06 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/4225023422' entity='client.admin' cmd={"prefix": "osd crush tunables", "profile": "default"} : dispatch 2026-04-13T17:50:07.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:06 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:07.536 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled mon update... 2026-04-13T17:50:07.582 DEBUG:teuthology.orchestra.run.vm01:mon.vm01> sudo journalctl -f -n 0 -u ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm01.service 2026-04-13T17:50:07.583 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:07.583 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:07.768 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:07.768 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:08.172 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:08.172 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:08.172 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:08.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:07 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/4225023422' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-04-13T17:50:08.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:07 vm00 ceph-mon[51174]: osdmap e4: 0 total, 0 up, 0 in 2026-04-13T17:50:08.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:07 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:08.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:07 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:08.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:07 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:08.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:07 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:08.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:07 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:09.221 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:09.222 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:09.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:08 vm00 ceph-mon[51174]: Deploying daemon alertmanager.vm00 on vm00 2026-04-13T17:50:09.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:08 vm00 ceph-mon[51174]: from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm00:192.168.123.100=vm00;vm01:192.168.123.101=vm01", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:50:09.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:08 vm00 ceph-mon[51174]: Saving service mon spec with placement vm00:192.168.123.100=vm00;vm01:192.168.123.101=vm01;count:2 2026-04-13T17:50:09.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:08 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/4289997266' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:09.366 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:09.366 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:09.782 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:09.782 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:09.782 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:10.018 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:09 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/2903091163' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:10.851 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:10.851 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:10.989 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:10.989 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:11.404 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:11.404 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:11.404 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: Generating cephadm-signed certificates for grafana_cert/grafana_key 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:11.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:11 vm00 ceph-mon[51174]: Deploying daemon grafana.vm00 on vm00 2026-04-13T17:50:12.454 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:12.454 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:12.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:12 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1047867117' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:12.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:12 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:12.581 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:12.581 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:13.032 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:13.032 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:13.032 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:13.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:13 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1442685908' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:14.125 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:14.126 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:14.262 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:14.263 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:14.678 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:14.679 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:14.679 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:15.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:14 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3435277308' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:15.751 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:15.751 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:15.889 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:15.889 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:16.305 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:16.305 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:16.305 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:17.385 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:17.385 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:17.535 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:17.535 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:17.561 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:17 vm00 ceph-mon[51174]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:17.561 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:17 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3709380527' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:18.005 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:18.005 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:18.005 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3177403258' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:18.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:18 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:19.071 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:19.071 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:19.208 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:19.208 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:19.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:19 vm00 ceph-mon[51174]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:19.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:19 vm00 ceph-mon[51174]: Deploying daemon prometheus.vm00 on vm00 2026-04-13T17:50:19.622 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:19.622 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:19.622 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:20.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:20 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/2481601413' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:20.687 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:20.687 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:20.830 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:20.830 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:21.250 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:21.250 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:21.250 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:21 vm00 ceph-mon[51174]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:21 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/2442677115' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:22.324 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:22.325 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:22.456 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:22.456 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:22.879 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:22 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:22.884 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:22.884 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:22.884 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:23.935 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:23.936 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:24.003 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:23 vm00 ceph-mon[51174]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:24.003 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:23 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3157746039' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:24.003 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:23 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:24.003 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:23 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:24.003 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:23 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:24.003 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:23 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-13T17:50:24.086 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:24.086 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:24.498 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:24.499 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:24.499 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:25.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:24 vm00 ceph-mon[51174]: from='mgr.14170 192.168.123.100:0/3925354040' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-13T17:50:25.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:24 vm00 ceph-mon[51174]: mgrmap e14: vm00.vrvkmc(active, since 27s) 2026-04-13T17:50:25.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:24 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/2216778586' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:25.576 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:25.576 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:25.710 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:25.710 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:26.130 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:26.130 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:26.130 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:26.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:26 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1104354670' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:27.183 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:27.184 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:27.314 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:27.315 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:27.760 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:27.761 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:27.761 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:28.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:27 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/68213668' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:28.812 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:28.812 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:28.950 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:28.950 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:29.366 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:29.366 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:29.366 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:29.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:29 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3335387279' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:30.412 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:30.413 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:30.547 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:30.547 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:30.965 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:30.965 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:30.965 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:31.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:31 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1900266330' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:32.010 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:32.010 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:32.149 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:32.149 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:32.580 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:32.580 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:32.580 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:33.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:32 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/2771741496' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:33.654 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:33.654 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:33.782 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:33.782 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:34.191 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:34.191 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:34.191 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:34.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:34 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1665708555' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:35.243 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:35.243 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:35.387 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:35.387 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: Active manager daemon vm00.vrvkmc restarted 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: Activating manager daemon vm00.vrvkmc 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: osdmap e5: 0 total, 0 up, 0 in 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: mgrmap e15: vm00.vrvkmc(active, starting, since 0.00402884s) 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr metadata", "who": "vm00.vrvkmc", "id": "vm00.vrvkmc"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mds metadata"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: Manager daemon vm00.vrvkmc is now available 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/mirror_snapshot_schedule"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/trash_purge_schedule"} : dispatch 2026-04-13T17:50:35.392 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:35.823 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:35.823 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:35.823 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:36.262 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:50:35] ENGINE Bus STARTING 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: mgrmap e16: vm00.vrvkmc(active, since 1.01128s) 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:50:35] ENGINE Serving on http://192.168.123.100:8765 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:50:35] ENGINE Serving on https://192.168.123.100:7150 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:50:35] ENGINE Bus STARTED 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: [13/Apr/2026:17:50:35] ENGINE Client ('192.168.123.100', 32784) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1206985874' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:36.263 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:36.875 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:36.875 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:37.013 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:37.013 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-13T17:50:37.434 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:37.434 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:37.434 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:37.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:37.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:37.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"} : dispatch 2026-04-13T17:50:37.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:37.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:37 vm00 ceph-mon[51174]: mgrmap e17: vm00.vrvkmc(active, since 2s) 2026-04-13T17:50:38.507 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:38.508 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:38.672 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/511063317' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:38.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:38 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:50:39.171 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:39.171 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:39.171 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm00:/etc/ceph/ceph.conf 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm01:/etc/ceph/ceph.conf 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm00:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm01:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm00:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.client.admin.keyring 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Updating vm01:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.client.admin.keyring 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: Deploying daemon ceph-exporter.vm01 on vm01 2026-04-13T17:50:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:39 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/210018893' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:40.282 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:40.282 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:40.433 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:40.989 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:40.989 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:40.989 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: Deploying daemon crash.vm01 on vm01 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: Deploying daemon node-exporter.vm01 on vm01 2026-04-13T17:50:41.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:41 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1311779565' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:42.054 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:42.054 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:42.190 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:42.613 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:42.613 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:42.613 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:43.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:42 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3507064256' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:43.670 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:43.671 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:43.827 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:44.302 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:44.303 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":1,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:49:05.198343Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-13T17:50:44.303 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 1 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.qjjyaa", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm01.qjjyaa", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr services"} : dispatch 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: Deploying daemon mgr.vm01.qjjyaa on vm01 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:44.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:44 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3098561543' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.130 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 systemd[1]: Starting Ceph mon.vm01 for 00063a34-3761-11f1-944c-abe11cccf0ff... 2026-04-13T17:50:45.358 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-13T17:50:45.358 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mon dump -f json 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 podman[56789]: 2026-04-13 17:50:45.129040512 +0000 UTC m=+0.017843057 container create 72e8ac9a9f7a4da087886c98a151f57086da4cb36d2d81dfad81a57863e8dae8 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 podman[56789]: 2026-04-13 17:50:45.179836094 +0000 UTC m=+0.068638639 container init 72e8ac9a9f7a4da087886c98a151f57086da4cb36d2d81dfad81a57863e8dae8 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8) 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 podman[56789]: 2026-04-13 17:50:45.182888168 +0000 UTC m=+0.071690724 container start 72e8ac9a9f7a4da087886c98a151f57086da4cb36d2d81dfad81a57863e8dae8 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9) 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 bash[56789]: 72e8ac9a9f7a4da087886c98a151f57086da4cb36d2d81dfad81a57863e8dae8 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 podman[56789]: 2026-04-13 17:50:45.122019714 +0000 UTC m=+0.010822268 image pull 06443d8796ac19529b3ba318ec42582d9c2b2d4723de9d23b91a737b3de50367 harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 systemd[1]: Started Ceph mon.vm01 for 00063a34-3761-11f1-944c-abe11cccf0ff. 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: set uid:gid to 167:167 (ceph:ceph) 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: ceph version 20.2.0-18-g0d1a6d86d0e (0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360) tentacle (stable - RelWithDebInfo), process ceph-mon, pid 2 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: pidfile_write: ignore empty --pid-file 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: load: jerasure load: lrc 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: RocksDB version: 7.9.2 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Git sha 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Compile date 2026-04-13 11:21:09 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: DB SUMMARY 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: DB Session ID: SS8WMFXDAMLOSIWXD738 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: CURRENT file: CURRENT 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: IDENTITY file: IDENTITY 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: SST files in /var/lib/ceph/mon/ceph-vm01/store.db dir, Total Num: 0, files: 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm01/store.db: 000004.log size: 511 ; 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.error_if_exists: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.create_if_missing: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.paranoid_checks: 1 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.flush_verify_memtable_count: 1 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.env: 0x562c88eb6440 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.fs: PosixFileSystem 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.info_log: 0x562c8b38f300 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_file_opening_threads: 16 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.statistics: (nil) 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.use_fsync: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_log_file_size: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.log_file_time_to_roll: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.keep_log_file_num: 1000 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.recycle_log_file_num: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.allow_fallocate: 1 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.allow_mmap_reads: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.allow_mmap_writes: 0 2026-04-13T17:50:45.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.use_direct_reads: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.create_missing_column_families: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.db_log_dir: 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.wal_dir: 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.table_cache_numshardbits: 6 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.WAL_ttl_seconds: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.WAL_size_limit_MB: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.is_fd_close_on_exec: 1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.advise_random_on_open: 1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.db_write_buffer_size: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.write_buffer_manager: 0x562c8b392500 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.use_adaptive_mutex: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.rate_limiter: (nil) 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.wal_recovery_mode: 2 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.enable_thread_tracking: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.enable_pipelined_write: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.unordered_write: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.row_cache: None 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.wal_filter: None 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.allow_ingest_behind: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.two_write_queues: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.manual_wal_flush: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.wal_compression: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.atomic_flush: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.persist_stats_to_disk: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.write_dbid_to_manifest: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.log_readahead_size: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.best_efforts_recovery: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.allow_data_in_errors: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.db_host_id: __hostname__ 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.enforce_single_del_contracts: true 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_background_jobs: 2 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_background_compactions: -1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_subcompactions: 1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.delayed_write_rate : 16777216 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_total_wal_size: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.stats_dump_period_sec: 600 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.stats_persist_period_sec: 600 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_open_files: -1 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bytes_per_sync: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.wal_bytes_per_sync: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.strict_bytes_per_sync: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_readahead_size: 0 2026-04-13T17:50:45.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_background_flushes: -1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Compression algorithms supported: 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kZSTD supported: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kXpressCompression supported: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kBZip2Compression supported: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kLZ4Compression supported: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kZlibCompression supported: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kLZ4HCCompression supported: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: kSnappyCompression supported: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Fast CRC32 supported: Supported on x86 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: DMutex implementation: pthread_mutex_t 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm01/store.db/MANIFEST-000005 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.merge_operator: 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_filter: None 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_filter_factory: None 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.sst_partitioner_factory: None 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.memtable_factory: SkipListFactory 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.table_factory: BlockBasedTable 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x562c8b38eec0) 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: cache_index_and_filter_blocks: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: pin_top_level_index_and_filter: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: index_type: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: data_block_index_type: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: index_shortening: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: data_block_hash_table_util_ratio: 0.750000 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: checksum: 4 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: no_block_cache: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_cache: 0x562c8b3858d0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_cache_name: BinnedLRUCache 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_cache_options: 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: capacity : 536870912 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: num_shard_bits : 4 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: strict_capacity_limit : 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: high_pri_pool_ratio: 0.000 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_cache_compressed: (nil) 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: persistent_cache: (nil) 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_size: 4096 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_size_deviation: 10 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_restart_interval: 16 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: index_block_restart_interval: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: metadata_block_size: 4096 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: partition_filters: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: use_delta_encoding: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: filter_policy: bloomfilter 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: whole_key_filtering: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: verify_compression: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: read_amp_bytes_per_bit: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: format_version: 5 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: enable_index_compression: 1 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: block_align: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: max_auto_readahead_size: 262144 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: prepopulate_block_cache: 0 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: initial_auto_readahead_size: 8192 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout: num_file_reads_for_auto_readahead: 2 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.write_buffer_size: 33554432 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_write_buffer_number: 2 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression: NoCompression 2026-04-13T17:50:45.392 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression: Disabled 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.prefix_extractor: nullptr 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.num_levels: 7 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.window_bits: -14 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.level: 32767 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.strategy: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.enabled: false 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.target_file_size_base: 67108864 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.target_file_size_multiplier: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.arena_block_size: 1048576 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.disable_auto_compactions: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.inplace_update_support: 0 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.inplace_update_num_locks: 10000 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-13T17:50:45.393 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.memtable_huge_page_size: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.bloom_locality: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.max_successive_merges: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.optimize_filters_for_hits: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.paranoid_file_checks: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.force_consistency_checks: 1 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.report_bg_io_stats: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.ttl: 2592000 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.periodic_compaction_seconds: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.enable_blob_files: false 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.min_blob_size: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.blob_file_size: 268435456 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.blob_compression_type: NoCompression 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.enable_blob_garbage_collection: false 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.blob_file_starting_level: 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm01/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 382bb5e9-a1ee-4127-adf2-0ad7913ccb65 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776102645215407, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776102645216089, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1776102645, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "382bb5e9-a1ee-4127-adf2-0ad7913ccb65", "db_session_id": "SS8WMFXDAMLOSIWXD738", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: EVENT_LOG_v1 {"time_micros": 1776102645216148, "job": 1, "event": "recovery_finished"} 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm01/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x562c8b3b0e00 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: DB pointer 0x562c8b4fc000 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01 does not exist in monmap, will attempt to join an existing cluster 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: ** DB Stats ** 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: ** Compaction Stats [default] ** 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.3 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.3 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 2.3 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: ** Compaction Stats [default] ** 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.3 0.00 0.00 1 0.001 0 0 0.0 0.0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-04-13T17:50:45.394 INFO:journalctl@ceph.mon.vm01.vm01.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: AddFile(Keys): cumulative 0, interval 0 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Cumulative compaction: 0.00 GB write, 0.26 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Interval compaction: 0.00 GB write, 0.26 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Block cache BinnedLRUCache@0x562c8b3858d0#2 capacity: 512.00 MB usage: 0.22 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 3.8e-05 secs_since: 0 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: ** File Read Latency Histogram By Level [default] ** 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: using public_addr v2:192.168.123.101:0/0 -> [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: starting mon.vm01 rank -1 at public addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] at bind addrs [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon_data /var/lib/ceph/mon/ceph-vm01 fsid 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(???) e0 preinit fsid 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).mds e1 new map 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).mds e1 print_map 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: e1 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: btime 2026-04-13T17:49:06:632051+0000 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: legacy client fscid: -1 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout: No filesystems configured 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1104354670' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/68213668' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/3335387279' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1900266330' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/2771741496' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1665708555' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Active manager daemon vm00.vrvkmc restarted 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Activating manager daemon vm00.vrvkmc 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: osdmap e5: 0 total, 0 up, 0 in 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mgrmap e15: vm00.vrvkmc(active, starting, since 0.00402884s) 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr metadata", "who": "vm00.vrvkmc", "id": "vm00.vrvkmc"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mds metadata"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Manager daemon vm00.vrvkmc is now available 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/mirror_snapshot_schedule"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm00.vrvkmc/trash_purge_schedule"} : dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: [13/Apr/2026:17:50:35] ENGINE Bus STARTING 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mgrmap e16: vm00.vrvkmc(active, since 1.01128s) 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:45.395 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: [13/Apr/2026:17:50:35] ENGINE Serving on http://192.168.123.100:8765 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: [13/Apr/2026:17:50:35] ENGINE Serving on https://192.168.123.100:7150 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: [13/Apr/2026:17:50:35] ENGINE Bus STARTED 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: [13/Apr/2026:17:50:35] ENGINE Client ('192.168.123.100', 32784) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1206985874' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mgrmap e17: vm00.vrvkmc(active, since 2s) 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/511063317' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm00:/etc/ceph/ceph.conf 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm01:/etc/ceph/ceph.conf 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm00:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm01:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm00:/etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm01:/etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm00:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.client.admin.keyring 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Updating vm01:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.client.admin.keyring 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Deploying daemon ceph-exporter.vm01 on vm01 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/210018893' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Deploying daemon crash.vm01 on vm01 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Deploying daemon node-exporter.vm01 on vm01 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1311779565' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/3507064256' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.qjjyaa", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm01.qjjyaa", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr services"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:45.396 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: Deploying daemon mgr.vm01.qjjyaa on vm01 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/3098561543' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-13T17:50:45.397 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:45 vm01 ceph-mon[56805]: mon.vm01@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-04-13T17:50:45.500 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm01/config 2026-04-13T17:50:50.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: mon.vm00 calling monitor election 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: mon.vm01 calling monitor election 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: mon.vm00 is new leader, mons vm00,vm01 in quorum (ranks 0,1) 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: monmap epoch 2 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: fsid 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: last_changed 2026-04-13T17:50:45.292138+0000 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: created 2026-04-13T17:49:05.198343+0000 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: min_mon_release 20 (tentacle) 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: election_strategy: 1 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.vm00 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: 1: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: fsmap 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: osdmap e5: 0 total, 0 up, 0 in 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: mgrmap e17: vm00.vrvkmc(active, since 16s) 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: overall HEALTH_OK 2026-04-13T17:50:50.642 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: mon.vm00 calling monitor election 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: mon.vm01 calling monitor election 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: mon.vm00 is new leader, mons vm00,vm01 in quorum (ranks 0,1) 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: monmap epoch 2 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: fsid 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: last_changed 2026-04-13T17:50:45.292138+0000 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: created 2026-04-13T17:49:05.198343+0000 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: min_mon_release 20 (tentacle) 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: election_strategy: 1 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: 0: [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] mon.vm00 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: 1: [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] mon.vm01 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: fsmap 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: osdmap e5: 0 total, 0 up, 0 in 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: mgrmap e17: vm00.vrvkmc(active, since 16s) 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: overall HEALTH_OK 2026-04-13T17:50:50.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:51.927 INFO:teuthology.orchestra.run.vm01.stdout: 2026-04-13T17:50:51.927 INFO:teuthology.orchestra.run.vm01.stdout:{"epoch":2,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","modified":"2026-04-13T17:50:45.292138Z","created":"2026-04-13T17:49:05.198343Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm00","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:3300","nonce":0},{"type":"v1","addr":"192.168.123.100:6789","nonce":0}]},"addr":"192.168.123.100:6789/0","public_addr":"192.168.123.100:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm01","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:3300","nonce":0},{"type":"v1","addr":"192.168.123.101:6789","nonce":0}]},"addr":"192.168.123.101:6789/0","public_addr":"192.168.123.101:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-04-13T17:50:51.927 INFO:teuthology.orchestra.run.vm01.stderr:dumped monmap epoch 2 2026-04-13T17:50:52.002 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-04-13T17:50:52.002 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph config generate-minimal-conf 2026-04-13T17:50:52.049 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Updating vm00:/etc/ceph/ceph.conf 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Updating vm01:/etc/ceph/ceph.conf 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Updating vm00:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Updating vm01:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Reconfiguring mon.vm00 (unknown last config time)... 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Reconfiguring daemon mon.vm00 on vm00 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Reconfiguring mgr.vm00.vrvkmc (unknown last config time)... 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm00.vrvkmc", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr services"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Reconfiguring daemon mgr.vm00.vrvkmc on vm00 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Reconfiguring ceph-exporter.vm00 (monmap changed)... 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm00", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: Reconfiguring daemon ceph-exporter.vm00 on vm00 2026-04-13T17:50:52.050 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:52 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1217278683' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:52.159 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:52.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Updating vm00:/etc/ceph/ceph.conf 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Updating vm01:/etc/ceph/ceph.conf 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Updating vm00:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Updating vm01:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/config/ceph.conf 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Reconfiguring mon.vm00 (unknown last config time)... 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Reconfiguring daemon mon.vm00 on vm00 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Reconfiguring mgr.vm00.vrvkmc (unknown last config time)... 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm00.vrvkmc", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr services"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Reconfiguring daemon mgr.vm00.vrvkmc on vm00 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Reconfiguring ceph-exporter.vm00 (monmap changed)... 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm00", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: Reconfiguring daemon ceph-exporter.vm00 on vm00 2026-04-13T17:50:52.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:52 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1217278683' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-13T17:50:52.540 INFO:teuthology.orchestra.run.vm00.stdout:# minimal ceph.conf for 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:52.540 INFO:teuthology.orchestra.run.vm00.stdout:[global] 2026-04-13T17:50:52.540 INFO:teuthology.orchestra.run.vm00.stdout: fsid = 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:50:52.540 INFO:teuthology.orchestra.run.vm00.stdout: mon_host = [v2:192.168.123.100:3300/0,v1:192.168.123.100:6789/0] [v2:192.168.123.101:3300/0,v1:192.168.123.101:6789/0] 2026-04-13T17:50:52.597 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-04-13T17:50:52.597 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:50:52.597 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.conf 2026-04-13T17:50:52.627 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:50:52.627 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:52.692 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:50:52.692 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.conf 2026-04-13T17:50:52.722 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:50:52.722 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-13T17:50:52.788 DEBUG:tasks.cephadm:set 0 configs 2026-04-13T17:50:52.789 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph config dump 2026-04-13T17:50:53.111 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:WHO MASK LEVEL OPTION VALUE RO 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:global dev auth_debug true 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:global basic container_image harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132 * 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:global dev debug_asserts_on_shutdown true 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:global basic log_to_file true 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:global basic log_to_journald false 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:global basic log_to_stderr false 2026-04-13T17:50:53.501 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_allow_pool_delete true 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_clock_drift_allowed 1.000000 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_cluster_log_to_file true 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_max_pg_per_osd 10000 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_pg_warn_max_object_skew 0.000000 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_warn_on_crush_straw_calc_version_zero false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_warn_on_legacy_crush_tunables false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_warn_on_osd_down_out_interval_zero false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global dev mon_warn_on_pool_pg_num_not_power_of_two false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced mon_warn_on_too_few_osds false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global dev ms_die_on_bug true 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global dev ms_die_on_old_message true 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced osd_pool_default_erasure_code_profile plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced osd_pool_default_pg_autoscale_mode off 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:global advanced public_network 192.168.123.0/24 * 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced auth_allow_insecure_global_id_reclaim false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced auth_mon_ticket_ttl 660.000000 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced auth_service_ticket_ttl 240.000000 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced debug_mon 20/20 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced debug_ms 1/1 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced debug_paxos 20/20 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced mon_data_avail_warn 5 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced mon_mgr_mkfs_grace 240 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon dev mon_osd_prime_pg_temp true 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced mon_osd_reporter_subtree_level osd 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced mon_reweight_min_bytes_per_osd 10 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced mon_reweight_min_pgs_per_osd 4 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim_allowed false 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced debug_mgr 20/20 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced debug_ms 1/1 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mgr/cephadm/allow_ptrace true * 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mgr/cephadm/container_init True * 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mgr/cephadm/migration_current 7 * 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mgr/dashboard/GRAFANA_API_SSL_VERIFY false * 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mgr/dashboard/ssl_server_port 8443 * 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mgr/orchestrator/orchestrator cephadm 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mon_reweight_min_bytes_per_osd 10 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:mgr advanced mon_reweight_min_pgs_per_osd 4 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:osd dev bdev_debug_aio true 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced debug_ms 1/1 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced debug_osd 20/20 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:osd dev osd_debug_misdirected_ops true 2026-04-13T17:50:53.502 INFO:teuthology.orchestra.run.vm00.stdout:osd dev osd_debug_op_order true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd dev osd_debug_pg_log_writeout true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd dev osd_debug_shutdown true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd dev osd_debug_verify_cached_snaps true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd dev osd_debug_verify_missing_on_start true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd dev osd_debug_verify_stray_on_activate true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_deep_scrub_update_digest_min_age 30 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd basic osd_mclock_iops_capacity_threshold_hdd 49000.000000 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_mclock_profile high_recovery_ops 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_memory_target_autotune true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_op_queue debug_random * 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_op_queue_cut_off debug_random * 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_recover_clone_overlap true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_recovery_max_chunk 1048576 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_scrub_load_threshold 5.000000 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_scrub_max_interval 600.000000 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:osd advanced osd_shutdown_pgref_assert true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:client.rgw advanced rgw_cache_enabled true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:client.rgw advanced rgw_enable_ops_log true 2026-04-13T17:50:53.503 INFO:teuthology.orchestra.run.vm00.stdout:client.rgw advanced rgw_enable_usage_log true 2026-04-13T17:50:53.546 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.546 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.546 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: Reconfiguring crash.vm00 (monmap changed)... 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: Reconfiguring daemon crash.vm00 on vm00 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/516769588' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: Reconfiguring alertmanager.vm00 deps ['mgr.vm00.vrvkmc', 'secure_monitoring_stack:False'] -> ['alertmanager.vm00', 'mgr.vm00.vrvkmc', 'mgr.vm01.qjjyaa', 'secure_monitoring_stack:False'] (diff {'mgr.vm01.qjjyaa', 'alertmanager.vm00'}) 2026-04-13T17:50:53.547 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:53 vm00 ceph-mon[51174]: Reconfiguring daemon alertmanager.vm00 on vm00 2026-04-13T17:50:53.572 INFO:tasks.cephadm:Deploying OSDs... 2026-04-13T17:50:53.572 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:50:53.572 DEBUG:teuthology.orchestra.run.vm00:> dd if=/scratch_devs of=/dev/stdout 2026-04-13T17:50:53.599 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-13T17:50:53.600 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/nvme0n1 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: Reconfiguring crash.vm00 (monmap changed)... 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm00", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: Reconfiguring daemon crash.vm00 on vm00 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/516769588' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: Reconfiguring alertmanager.vm00 deps ['mgr.vm00.vrvkmc', 'secure_monitoring_stack:False'] -> ['alertmanager.vm00', 'mgr.vm00.vrvkmc', 'mgr.vm01.qjjyaa', 'secure_monitoring_stack:False'] (diff {'mgr.vm01.qjjyaa', 'alertmanager.vm00'}) 2026-04-13T17:50:53.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:53 vm01 ceph-mon[56805]: Reconfiguring daemon alertmanager.vm00 on vm00 2026-04-13T17:50:53.660 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/nvme0n1 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 993 Links: 1 Device type: 103,1 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:50:02.722044624 +0000 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:33.058139302 +0000 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:33.058139302 +0000 2026-04-13T17:50:53.661 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:32.119141418 +0000 2026-04-13T17:50:53.661 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-13T17:50:53.731 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:50:53.731 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:50:53.731 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000161132 s, 3.2 MB/s 2026-04-13T17:50:53.732 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-13T17:50:53.791 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/nvme1n1 2026-04-13T17:50:53.850 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/nvme1n1 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 1005 Links: 1 Device type: 103,2 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:50:02.758045128 +0000 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:33.464135391 +0000 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:33.464135391 +0000 2026-04-13T17:50:53.851 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:32.263141722 +0000 2026-04-13T17:50:53.851 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-13T17:50:53.920 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:50:53.920 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:50:53.920 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000141965 s, 3.6 MB/s 2026-04-13T17:50:53.921 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-13T17:50:53.978 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/nvme2n1 2026-04-13T17:50:54.035 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/nvme2n1 2026-04-13T17:50:54.035 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:54.035 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 1016 Links: 1 Device type: 103,4 2026-04-13T17:50:54.035 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:54.035 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:54.035 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:50:02.789045561 +0000 2026-04-13T17:50:54.036 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:33.888133405 +0000 2026-04-13T17:50:54.036 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:33.888133405 +0000 2026-04-13T17:50:54.036 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:32.415142043 +0000 2026-04-13T17:50:54.036 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-13T17:50:54.103 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:50:54.103 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:50:54.103 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000170248 s, 3.0 MB/s 2026-04-13T17:50:54.104 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-13T17:50:54.162 DEBUG:teuthology.orchestra.run.vm00:> stat /dev/nvme3n1 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout: File: /dev/nvme3n1 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout:Device: 6h/6d Inode: 1029 Links: 1 Device type: 103,6 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout:Access: 2026-04-13 17:50:02.819045981 +0000 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout:Modify: 2026-04-13 17:48:34.296134268 +0000 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout:Change: 2026-04-13 17:48:34.296134268 +0000 2026-04-13T17:50:54.234 INFO:teuthology.orchestra.run.vm00.stdout: Birth: 2026-04-13 17:48:32.553142334 +0000 2026-04-13T17:50:54.234 DEBUG:teuthology.orchestra.run.vm00:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-13T17:50:54.303 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records in 2026-04-13T17:50:54.303 INFO:teuthology.orchestra.run.vm00.stderr:1+0 records out 2026-04-13T17:50:54.303 INFO:teuthology.orchestra.run.vm00.stderr:512 bytes copied, 0.000178274 s, 2.9 MB/s 2026-04-13T17:50:54.307 DEBUG:teuthology.orchestra.run.vm00:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-13T17:50:54.366 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:50:54.366 DEBUG:teuthology.orchestra.run.vm01:> dd if=/scratch_devs of=/dev/stdout 2026-04-13T17:50:54.386 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-13T17:50:54.386 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme0n1 2026-04-13T17:50:54.442 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:54 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1938730590' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-13T17:50:54.442 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:54 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:54.442 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:54 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:54.442 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:54 vm00 ceph-mon[51174]: Reconfiguring grafana.vm00 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm00', 'secure_monitoring_stack:False'] (diff {'prometheus.vm00'}) 2026-04-13T17:50:54.442 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:54 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-13T17:50:54.442 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:54 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-13T17:50:54.442 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:54 vm00 ceph-mon[51174]: Reconfiguring daemon grafana.vm00 on vm00 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme0n1 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 986 Links: 1 Device type: 103,0 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:50:37.730064775 +0000 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:36.271805813 +0000 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:36.271805813 +0000 2026-04-13T17:50:54.445 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:35.396803932 +0000 2026-04-13T17:50:54.446 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-13T17:50:54.518 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:50:54.518 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:50:54.518 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.00022455 s, 2.3 MB/s 2026-04-13T17:50:54.519 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-13T17:50:54.576 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme1n1 2026-04-13T17:50:54.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:54 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1938730590' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-13T17:50:54.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:54 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:54.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:54 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:54.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:54 vm01 ceph-mon[56805]: Reconfiguring grafana.vm00 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm00', 'secure_monitoring_stack:False'] (diff {'prometheus.vm00'}) 2026-04-13T17:50:54.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:54 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-13T17:50:54.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:54 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-13T17:50:54.622 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:54 vm01 ceph-mon[56805]: Reconfiguring daemon grafana.vm00 on vm00 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme1n1 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 998 Links: 1 Device type: 103,2 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:50:37.761064788 +0000 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:36.670806670 +0000 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:36.670806670 +0000 2026-04-13T17:50:54.636 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:35.525804210 +0000 2026-04-13T17:50:54.636 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-13T17:50:54.699 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:50:54.699 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:50:54.699 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000184095 s, 2.8 MB/s 2026-04-13T17:50:54.700 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-13T17:50:54.755 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme2n1 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme2n1 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 1011 Links: 1 Device type: 103,4 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:50:37.794064802 +0000 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:37.075807541 +0000 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:37.075807541 +0000 2026-04-13T17:50:54.811 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:35.659804498 +0000 2026-04-13T17:50:54.811 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-13T17:50:54.882 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:50:54.882 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:50:54.882 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000185428 s, 2.8 MB/s 2026-04-13T17:50:54.883 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-13T17:50:54.942 DEBUG:teuthology.orchestra.run.vm01:> stat /dev/nvme3n1 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout: File: /dev/nvme3n1 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout:Device: 6h/6d Inode: 1023 Links: 1 Device type: 103,6 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout:Access: 2026-04-13 17:50:37.830064817 +0000 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout:Modify: 2026-04-13 17:48:37.493808439 +0000 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout:Change: 2026-04-13 17:48:37.493808439 +0000 2026-04-13T17:50:55.003 INFO:teuthology.orchestra.run.vm01.stdout: Birth: 2026-04-13 17:48:35.797804794 +0000 2026-04-13T17:50:55.003 DEBUG:teuthology.orchestra.run.vm01:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-13T17:50:55.071 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records in 2026-04-13T17:50:55.071 INFO:teuthology.orchestra.run.vm01.stderr:1+0 records out 2026-04-13T17:50:55.071 INFO:teuthology.orchestra.run.vm01.stderr:512 bytes copied, 0.000168507 s, 3.0 MB/s 2026-04-13T17:50:55.073 DEBUG:teuthology.orchestra.run.vm01:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-13T17:50:55.133 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch apply osd --all-available-devices 2026-04-13T17:50:55.311 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm01/config 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: Standby manager daemon vm01.qjjyaa started 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm01.qjjyaa/crt"} : dispatch 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm01.qjjyaa/key"} : dispatch 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: Reconfiguring prometheus.vm00 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm00', 'mgr.vm00.vrvkmc', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm00', 'ceph-exporter.vm01', 'mgr.vm00.vrvkmc', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm01'}) 2026-04-13T17:50:55.505 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:55 vm00 ceph-mon[51174]: Reconfiguring daemon prometheus.vm00 on vm00 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: Standby manager daemon vm01.qjjyaa started 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm01.qjjyaa/crt"} : dispatch 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm01.qjjyaa/key"} : dispatch 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: from='mgr.? 192.168.123.101:0/3336971577' entity='mgr.vm01.qjjyaa' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: Reconfiguring prometheus.vm00 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm00', 'mgr.vm00.vrvkmc', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm00', 'ceph-exporter.vm01', 'mgr.vm00.vrvkmc', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm01'}) 2026-04-13T17:50:55.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:55 vm01 ceph-mon[56805]: Reconfiguring daemon prometheus.vm00 on vm00 2026-04-13T17:50:55.709 INFO:teuthology.orchestra.run.vm01.stdout:Scheduled osd.all-available-devices update... 2026-04-13T17:50:55.761 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-04-13T17:50:55.762 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:50:55.918 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:56.313 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:56.362 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: mgrmap e18: vm00.vrvkmc(active, since 21s), standbys: vm01.qjjyaa 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr metadata", "who": "vm01.qjjyaa", "id": "vm01.qjjyaa"} : dispatch 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='client.24099 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: Marking host: vm00 for OSDSpec preview refresh. 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: Marking host: vm01 for OSDSpec preview refresh. 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: Saving service osd.all-available-devices spec with placement * 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-13T17:50:56.575 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:56 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: mgrmap e18: vm00.vrvkmc(active, since 21s), standbys: vm01.qjjyaa 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr metadata", "who": "vm01.qjjyaa", "id": "vm01.qjjyaa"} : dispatch 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='client.24099 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: Marking host: vm00 for OSDSpec preview refresh. 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: Marking host: vm01 for OSDSpec preview refresh. 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: Saving service osd.all-available-devices spec with placement * 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: Reconfiguring ceph-exporter.vm01 (monmap changed)... 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm01", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: Reconfiguring daemon ceph-exporter.vm01 on vm01 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm01", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-13T17:50:56.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:56 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:57.363 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: Reconfiguring crash.vm01 (monmap changed)... 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: Reconfiguring daemon crash.vm01 on vm01 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3744811767' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: Reconfiguring mgr.vm01.qjjyaa (monmap changed)... 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.qjjyaa", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr services"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: Reconfiguring daemon mgr.vm01.qjjyaa on vm01 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: Reconfiguring mon.vm01 (monmap changed)... 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: Reconfiguring daemon mon.vm01 on vm01 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm00.local:3000"} : dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm00.local:3000"}]: dispatch 2026-04-13T17:50:57.510 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm00.local:9095"} : dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm00.local:9095"}]: dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:50:57.511 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:57 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:57.538 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:57.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: Reconfiguring crash.vm01 (monmap changed)... 2026-04-13T17:50:57.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: Reconfiguring daemon crash.vm01 on vm01 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/3744811767' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: Reconfiguring mgr.vm01.qjjyaa (monmap changed)... 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm01.qjjyaa", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mgr services"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: Reconfiguring daemon mgr.vm01.qjjyaa on vm01 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: Reconfiguring mon.vm01 (monmap changed)... 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: Reconfiguring daemon mon.vm01 on vm01 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm00.local:9093"}]: dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm00.local:3000"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm00.local:3000"}]: dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm00.local:9095"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm00.local:9095"}]: dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:50:57.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:57 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-13T17:50:57.930 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:57.988 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: Certificate for "grafana_cert (vm00)" is still valid for 1094 days. 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2402461385' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:50:58.961 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-13T17:50:58.962 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:58.962 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-13T17:50:58.962 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:58 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:58.989 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:50:59.124 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: Certificate for "grafana_cert (vm00)" is still valid for 1094 days. 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2402461385' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-13T17:50:59.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:58 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:50:59.504 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:50:59.563 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-13T17:51:00.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:59 vm01 ceph-mon[56805]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:00.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:50:59 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/4179335631' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:00.267 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:59 vm00 ceph-mon[51174]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:00.267 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:50:59 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/4179335631' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:00.563 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:00.713 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:01.122 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/2449768731' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "3331b8e8-674c-47c3-bfcd-df00c60d9807"} : dispatch 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "3331b8e8-674c-47c3-bfcd-df00c60d9807"} : dispatch 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3331b8e8-674c-47c3-bfcd-df00c60d9807"}]': finished 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: osdmap e6: 1 total, 0 up, 1 in 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1595193181' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1f69d7b9-313c-49a1-a641-2e9a9aa0a7f2"} : dispatch 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1595193181' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1f69d7b9-313c-49a1-a641-2e9a9aa0a7f2"}]': finished 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: osdmap e7: 2 total, 0 up, 2 in 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1889616746' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:01.122 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:00 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1456142718' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:01.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/2449768731' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "3331b8e8-674c-47c3-bfcd-df00c60d9807"} : dispatch 2026-04-13T17:51:01.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "3331b8e8-674c-47c3-bfcd-df00c60d9807"} : dispatch 2026-04-13T17:51:01.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3331b8e8-674c-47c3-bfcd-df00c60d9807"}]': finished 2026-04-13T17:51:01.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: osdmap e6: 1 total, 0 up, 1 in 2026-04-13T17:51:01.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:01.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1595193181' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1f69d7b9-313c-49a1-a641-2e9a9aa0a7f2"} : dispatch 2026-04-13T17:51:01.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1595193181' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1f69d7b9-313c-49a1-a641-2e9a9aa0a7f2"}]': finished 2026-04-13T17:51:01.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: osdmap e7: 2 total, 0 up, 2 in 2026-04-13T17:51:01.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:01.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:01.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1889616746' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:01.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:00 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1456142718' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:01.186 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1776102660,"num_remapped_pgs":0} 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/552926174' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3100408428' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "341364f2-8742-4fac-bbbd-c5a789f2cf2c"} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "341364f2-8742-4fac-bbbd-c5a789f2cf2c"} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "341364f2-8742-4fac-bbbd-c5a789f2cf2c"}]': finished 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: osdmap e8: 3 total, 0 up, 3 in 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/292004907' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f5faee95-1805-4c8c-bfbc-2faa1b776d58"} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/292004907' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f5faee95-1805-4c8c-bfbc-2faa1b776d58"}]': finished 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: osdmap e9: 4 total, 0 up, 4 in 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:01.967 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:01 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/552926174' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/3100408428' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "341364f2-8742-4fac-bbbd-c5a789f2cf2c"} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "341364f2-8742-4fac-bbbd-c5a789f2cf2c"} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "341364f2-8742-4fac-bbbd-c5a789f2cf2c"}]': finished 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: osdmap e8: 3 total, 0 up, 3 in 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/292004907' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "f5faee95-1805-4c8c-bfbc-2faa1b776d58"} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/292004907' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "f5faee95-1805-4c8c-bfbc-2faa1b776d58"}]': finished 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: osdmap e9: 4 total, 0 up, 4 in 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:02.133 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:01 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:02.187 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:02.330 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:02.716 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:02.775 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1776102661,"num_remapped_pgs":0} 2026-04-13T17:51:02.877 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:02 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/771547145' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:02.877 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:02 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2659683261' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:02.877 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:02 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1816360041' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:03.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:02 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/771547145' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:03.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:02 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2659683261' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:03.019 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:02 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1816360041' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:03.775 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/630958298' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "8540763b-ebe2-4b6e-aa18-9e97c56c3668"} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "8540763b-ebe2-4b6e-aa18-9e97c56c3668"} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8540763b-ebe2-4b6e-aa18-9e97c56c3668"}]': finished 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: osdmap e10: 5 total, 0 up, 5 in 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2269557865' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "05752c92-abcf-43e4-8022-6bfac80c889f"} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2269557865' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "05752c92-abcf-43e4-8022-6bfac80c889f"}]': finished 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: osdmap e11: 6 total, 0 up, 6 in 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/3831582209' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:03.882 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:03 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/604482882' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:03.936 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/630958298' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "8540763b-ebe2-4b6e-aa18-9e97c56c3668"} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "8540763b-ebe2-4b6e-aa18-9e97c56c3668"} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8540763b-ebe2-4b6e-aa18-9e97c56c3668"}]': finished 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: osdmap e10: 5 total, 0 up, 5 in 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2269557865' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "05752c92-abcf-43e4-8022-6bfac80c889f"} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2269557865' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "05752c92-abcf-43e4-8022-6bfac80c889f"}]': finished 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: osdmap e11: 6 total, 0 up, 6 in 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/3831582209' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:04.133 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:03 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/604482882' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:04.329 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:04.400 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":12,"num_osds":7,"num_up_osds":0,"osd_up_since":0,"num_in_osds":7,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/2061131688' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "72f6cb1d-edb6-4bf9-be69-8175437241a2"} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "72f6cb1d-edb6-4bf9-be69-8175437241a2"} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "72f6cb1d-edb6-4bf9-be69-8175437241a2"}]': finished 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: osdmap e12: 7 total, 0 up, 7 in 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3631733761' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1053558470' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "d4157b61-d78c-4dc1-8f66-e17bf55003cd"} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1053558470' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d4157b61-d78c-4dc1-8f66-e17bf55003cd"}]': finished 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: osdmap e13: 8 total, 0 up, 8 in 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:04.957 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:04 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/1936413756' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:05.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/2061131688' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "72f6cb1d-edb6-4bf9-be69-8175437241a2"} : dispatch 2026-04-13T17:51:05.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "72f6cb1d-edb6-4bf9-be69-8175437241a2"} : dispatch 2026-04-13T17:51:05.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "72f6cb1d-edb6-4bf9-be69-8175437241a2"}]': finished 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: osdmap e12: 7 total, 0 up, 7 in 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/3631733761' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1053558470' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "d4157b61-d78c-4dc1-8f66-e17bf55003cd"} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1053558470' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "d4157b61-d78c-4dc1-8f66-e17bf55003cd"}]': finished 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: osdmap e13: 8 total, 0 up, 8 in 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:05.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:04 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/1936413756' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:05.401 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:05.539 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:05.921 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:05.986 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:06.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:05 vm00 ceph-mon[51174]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:06.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:05 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1849876281' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:06.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:05 vm01 ceph-mon[56805]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:06.141 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:05 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1849876281' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-13T17:51:06.987 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:07.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:06 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2962000233' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:07.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:06 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-13T17:51:07.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:06 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:07.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:06 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-13T17:51:07.009 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:06 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:07.115 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:07.158 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:06 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2962000233' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:07.158 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:06 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-13T17:51:07.158 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:06 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:07.158 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:06 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-13T17:51:07.158 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:06 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:07.495 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:07.614 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:08.055 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:07 vm01 ceph-mon[56805]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:08.055 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:07 vm01 ceph-mon[56805]: Deploying daemon osd.0 on vm01 2026-04-13T17:51:08.056 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:07 vm01 ceph-mon[56805]: Deploying daemon osd.1 on vm00 2026-04-13T17:51:08.056 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:07 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2436564693' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:08.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:07 vm00 ceph-mon[51174]: pgmap v17: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:08.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:07 vm00 ceph-mon[51174]: Deploying daemon osd.0 on vm01 2026-04-13T17:51:08.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:07 vm00 ceph-mon[51174]: Deploying daemon osd.1 on vm00 2026-04-13T17:51:08.266 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:07 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2436564693' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:08.616 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:08.766 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:09.137 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:09.191 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/156849945' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='osd.0 [v2:192.168.123.101:6800/25951616,v1:192.168.123.101:6801/25951616]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]} : dispatch 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]} : dispatch 2026-04-13T17:51:10.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:09 vm00 ceph-mon[51174]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: pgmap v18: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/156849945' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='osd.0 [v2:192.168.123.101:6800/25951616,v1:192.168.123.101:6801/25951616]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]} : dispatch 2026-04-13T17:51:10.094 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:09 vm01 ceph-mon[56805]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]} : dispatch 2026-04-13T17:51:10.192 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:10.364 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:10.848 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:10.907 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":14,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: Deploying daemon osd.2 on vm01 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: Deploying daemon osd.3 on vm00 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: osdmap e14: 8 total, 0 up, 8 in 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='osd.0 [v2:192.168.123.101:6800/25951616,v1:192.168.123.101:6801/25951616]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:11.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:10 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2229197999' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: Deploying daemon osd.2 on vm01 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: Deploying daemon osd.3 on vm00 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: osdmap e14: 8 total, 0 up, 8 in 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='osd.0 [v2:192.168.123.101:6800/25951616,v1:192.168.123.101:6801/25951616]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:11.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:10 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2229197999' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:11.907 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:12.070 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: osdmap e15: 8 total, 0 up, 8 in 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:12.153 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:12.154 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:12.154 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:11 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: osdmap e15: 8 total, 0 up, 8 in 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:12.202 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:11 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:12.479 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:12.542 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":16,"num_osds":8,"num_up_osds":2,"osd_up_since":1776102672,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: osd.0 [v2:192.168.123.101:6800/25951616,v1:192.168.123.101:6801/25951616] boot 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304] boot 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: osdmap e16: 8 total, 2 up, 8 in 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2587999135' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='osd.2 [v2:192.168.123.101:6808/2885552490,v1:192.168.123.101:6809/2885552490]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-13T17:51:13.020 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:13 vm00 ceph-mon[51174]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304]' entity='osd.1' 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: osd.0 [v2:192.168.123.101:6800/25951616,v1:192.168.123.101:6801/25951616] boot 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: osd.1 [v2:192.168.123.100:6802/933335304,v1:192.168.123.100:6803/933335304] boot 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: osdmap e16: 8 total, 2 up, 8 in 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2587999135' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='osd.2 [v2:192.168.123.101:6808/2885552490,v1:192.168.123.101:6809/2885552490]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]} : dispatch 2026-04-13T17:51:13.083 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:13 vm01 ceph-mon[56805]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]} : dispatch 2026-04-13T17:51:13.543 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:13.784 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: Deploying daemon osd.4 on vm01 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: Deploying daemon osd.5 on vm00 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: osdmap e17: 8 total, 2 up, 8 in 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='osd.2 [v2:192.168.123.101:6808/2885552490,v1:192.168.123.101:6809/2885552490]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:14.024 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:14 vm00 ceph-mon[51174]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:14.182 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:14.232 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":17,"num_osds":8,"num_up_osds":2,"osd_up_since":1776102672,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: Deploying daemon osd.4 on vm01 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: Deploying daemon osd.5 on vm00 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: osdmap e17: 8 total, 2 up, 8 in 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='osd.2 [v2:192.168.123.101:6808/2885552490,v1:192.168.123.101:6809/2885552490]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:14.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:14 vm01 ceph-mon[56805]: from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/348171078' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: pgmap v25: 0 pgs: ; 0 B data, 289 MiB used, 40 GiB / 40 GiB avail 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: osdmap e18: 8 total, 2 up, 8 in 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:15.173 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:15 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:15.233 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/348171078' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: pgmap v25: 0 pgs: ; 0 B data, 289 MiB used, 40 GiB / 40 GiB avail 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: osdmap e18: 8 total, 2 up, 8 in 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:15.251 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:15 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:15.379 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:15.764 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:15.834 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":19,"num_osds":8,"num_up_osds":4,"osd_up_since":1776102675,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: OSD bench result of 51197.210066 IOPS is not within the threshold limit range of 50.000000 IOPS and 49000.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: Deploying daemon osd.6 on vm01 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: osd.2 [v2:192.168.123.101:6808/2885552490,v1:192.168.123.101:6809/2885552490] boot 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741] boot 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: osdmap e19: 8 total, 4 up, 8 in 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='osd.4 [v2:192.168.123.101:6816/2861109068,v1:192.168.123.101:6817/2861109068]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: Deploying daemon osd.7 on vm00 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/130842982' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:16.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:16 vm00 ceph-mon[51174]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: OSD bench result of 51197.210066 IOPS is not within the threshold limit range of 50.000000 IOPS and 49000.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: Deploying daemon osd.6 on vm01 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: osd.2 [v2:192.168.123.101:6808/2885552490,v1:192.168.123.101:6809/2885552490] boot 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: osd.3 [v2:192.168.123.100:6810/3342867741,v1:192.168.123.100:6811/3342867741] boot 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: osdmap e19: 8 total, 4 up, 8 in 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='osd.4 [v2:192.168.123.101:6816/2861109068,v1:192.168.123.101:6817/2861109068]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: Deploying daemon osd.7 on vm00 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/130842982' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:16.357 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:16 vm01 ceph-mon[56805]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]} : dispatch 2026-04-13T17:51:16.835 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:16.980 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:17.334 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:17.410 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":20,"num_osds":8,"num_up_osds":4,"osd_up_since":1776102675,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: pgmap v28: 0 pgs: ; 0 B data, 315 MiB used, 60 GiB / 60 GiB avail 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: osdmap e20: 8 total, 4 up, 8 in 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='osd.4 [v2:192.168.123.101:6816/2861109068,v1:192.168.123.101:6817/2861109068]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-13T17:51:17.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:17 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/533703287' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: pgmap v28: 0 pgs: ; 0 B data, 315 MiB used, 60 GiB / 60 GiB avail 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: osdmap e20: 8 total, 4 up, 8 in 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='osd.4 [v2:192.168.123.101:6816/2861109068,v1:192.168.123.101:6817/2861109068]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-13T17:51:17.865 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:17 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/533703287' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:18.412 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: osdmap e21: 8 total, 4 up, 8 in 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='osd.6 [v2:192.168.123.101:6824/4283425216,v1:192.168.123.101:6825/4283425216]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: osd.4 [v2:192.168.123.101:6816/2861109068,v1:192.168.123.101:6817/2861109068] boot 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992] boot 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: osdmap e22: 8 total, 6 up, 8 in 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:18.540 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:18 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:18.560 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: osdmap e21: 8 total, 4 up, 8 in 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992]' entity='osd.5' 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='osd.6 [v2:192.168.123.101:6824/4283425216,v1:192.168.123.101:6825/4283425216]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: osd.4 [v2:192.168.123.101:6816/2861109068,v1:192.168.123.101:6817/2861109068] boot 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: osd.5 [v2:192.168.123.100:6818/3592485992,v1:192.168.123.100:6819/3592485992] boot 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: osdmap e22: 8 total, 6 up, 8 in 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:18.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:18 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:18.933 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:18.983 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":22,"num_osds":8,"num_up_osds":6,"osd_up_since":1776102678,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: pgmap v31: 1 pgs: 1 unknown; 0 B data, 906 MiB used, 79 GiB / 80 GiB avail 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='osd.7 [v2:192.168.123.100:6826/3218096137,v1:192.168.123.100:6827/3218096137]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1206572046' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: osdmap e23: 8 total, 6 up, 8 in 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:19.767 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:19 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:19.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:19.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:19.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:19.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:19.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: pgmap v31: 1 pgs: 1 unknown; 0 B data, 906 MiB used, 79 GiB / 80 GiB avail 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='osd.7 [v2:192.168.123.100:6826/3218096137,v1:192.168.123.100:6827/3218096137]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]} : dispatch 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1206572046' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: osdmap e23: 8 total, 6 up, 8 in 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:19.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:19 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:19.984 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:20.155 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:20.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 sudo[77374]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-04-13T17:51:20.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 sudo[77374]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-04-13T17:51:20.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 sudo[77374]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-04-13T17:51:20.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 sudo[77374]: pam_unix(sudo:session): session closed for user root 2026-04-13T17:51:20.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 sudo[67450]: ceph : PWD=/ ; USER=root ; COMMAND=/usr/sbin/smartctl -x --json=o /dev/vda 2026-04-13T17:51:20.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 sudo[67450]: pam_systemd(sudo:session): Failed to connect to system bus: No such file or directory 2026-04-13T17:51:20.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 sudo[67450]: pam_unix(sudo:session): session opened for user root(uid=0) by (uid=167) 2026-04-13T17:51:20.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 sudo[67450]: pam_unix(sudo:session): session closed for user root 2026-04-13T17:51:20.551 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:20.629 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":24,"num_osds":8,"num_up_osds":6,"osd_up_since":1776102678,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='osd.6 [v2:192.168.123.101:6824/4283425216,v1:192.168.123.101:6825/4283425216]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='osd.7 [v2:192.168.123.100:6826/3218096137,v1:192.168.123.100:6827/3218096137]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: osdmap e24: 8 total, 6 up, 8 in 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:20.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:20 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:20.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='osd.6 [v2:192.168.123.101:6824/4283425216,v1:192.168.123.101:6825/4283425216]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='osd.7 [v2:192.168.123.100:6826/3218096137,v1:192.168.123.100:6827/3218096137]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm01", "root=default"]} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm00", "root=default"]} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm00"} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "mon metadata", "id": "vm01"} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm01", "root=default"]}]': finished 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm00", "root=default"]}]': finished 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: osdmap e24: 8 total, 6 up, 8 in 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:20.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:20 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:21.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: pgmap v34: 1 pgs: 1 unknown; 0 B data, 1.2 GiB used, 119 GiB / 120 GiB avail 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/43869254' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='osd.6 ' entity='osd.6' 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: mgrmap e19: vm00.vrvkmc(active, since 46s), standbys: vm01.qjjyaa 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:21.630 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd stat -f json 2026-04-13T17:51:21.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:21.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: pgmap v34: 1 pgs: 1 unknown; 0 B data, 1.2 GiB used, 119 GiB / 120 GiB avail 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/43869254' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='osd.6 ' entity='osd.6' 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: mgrmap e19: vm00.vrvkmc(active, since 46s), standbys: vm01.qjjyaa 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:21.641 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:21.763 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:22.123 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:22.183 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":25,"num_osds":8,"num_up_osds":8,"osd_up_since":1776102681,"num_in_osds":8,"osd_in_since":1776102664,"num_remapped_pgs":0} 2026-04-13T17:51:22.183 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd dump --format=json 2026-04-13T17:51:22.330 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: purged_snaps scrub starts 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: purged_snaps scrub ok 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: osd.7 [v2:192.168.123.100:6826/3218096137,v1:192.168.123.100:6827/3218096137] boot 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: osd.6 [v2:192.168.123.101:6824/4283425216,v1:192.168.123.101:6825/4283425216] boot 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: osdmap e25: 8 total, 8 up, 8 in 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/619933138' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:22 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-13T17:51:22.692 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:22.692 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":26,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","created":"2026-04-13T17:49:06.632406+0000","modified":"2026-04-13T17:51:22.488889+0000","last_up_change":"2026-04-13T17:51:21.479929+0000","last_in_change":"2026-04-13T17:51:04.368986+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-13T17:51:16.549032+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"24","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"3331b8e8-674c-47c3-bfcd-df00c60d9807","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6800","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6801","nonce":25951616}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6803","nonce":25951616}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6807","nonce":25951616}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6805","nonce":25951616}]},"public_addr":"192.168.123.101:6801/25951616","cluster_addr":"192.168.123.101:6803/25951616","heartbeat_back_addr":"192.168.123.101:6807/25951616","heartbeat_front_addr":"192.168.123.101:6805/25951616","state":["exists","up"]},{"osd":1,"uuid":"1f69d7b9-313c-49a1-a641-2e9a9aa0a7f2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6803","nonce":933335304}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6805","nonce":933335304}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6809","nonce":933335304}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6807","nonce":933335304}]},"public_addr":"192.168.123.100:6803/933335304","cluster_addr":"192.168.123.100:6805/933335304","heartbeat_back_addr":"192.168.123.100:6809/933335304","heartbeat_front_addr":"192.168.123.100:6807/933335304","state":["exists","up"]},{"osd":2,"uuid":"341364f2-8742-4fac-bbbd-c5a789f2cf2c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6809","nonce":2885552490}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6811","nonce":2885552490}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6815","nonce":2885552490}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6813","nonce":2885552490}]},"public_addr":"192.168.123.101:6809/2885552490","cluster_addr":"192.168.123.101:6811/2885552490","heartbeat_back_addr":"192.168.123.101:6815/2885552490","heartbeat_front_addr":"192.168.123.101:6813/2885552490","state":["exists","up"]},{"osd":3,"uuid":"f5faee95-1805-4c8c-bfbc-2faa1b776d58","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6811","nonce":3342867741}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6813","nonce":3342867741}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6817","nonce":3342867741}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6815","nonce":3342867741}]},"public_addr":"192.168.123.100:6811/3342867741","cluster_addr":"192.168.123.100:6813/3342867741","heartbeat_back_addr":"192.168.123.100:6817/3342867741","heartbeat_front_addr":"192.168.123.100:6815/3342867741","state":["exists","up"]},{"osd":4,"uuid":"8540763b-ebe2-4b6e-aa18-9e97c56c3668","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6817","nonce":2861109068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6819","nonce":2861109068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6823","nonce":2861109068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6821","nonce":2861109068}]},"public_addr":"192.168.123.101:6817/2861109068","cluster_addr":"192.168.123.101:6819/2861109068","heartbeat_back_addr":"192.168.123.101:6823/2861109068","heartbeat_front_addr":"192.168.123.101:6821/2861109068","state":["exists","up"]},{"osd":5,"uuid":"05752c92-abcf-43e4-8022-6bfac80c889f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6819","nonce":3592485992}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6821","nonce":3592485992}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6825","nonce":3592485992}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6823","nonce":3592485992}]},"public_addr":"192.168.123.100:6819/3592485992","cluster_addr":"192.168.123.100:6821/3592485992","heartbeat_back_addr":"192.168.123.100:6825/3592485992","heartbeat_front_addr":"192.168.123.100:6823/3592485992","state":["exists","up"]},{"osd":6,"uuid":"72f6cb1d-edb6-4bf9-be69-8175437241a2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":25,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6825","nonce":4283425216}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6827","nonce":4283425216}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6831","nonce":4283425216}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6829","nonce":4283425216}]},"public_addr":"192.168.123.101:6825/4283425216","cluster_addr":"192.168.123.101:6827/4283425216","heartbeat_back_addr":"192.168.123.101:6831/4283425216","heartbeat_front_addr":"192.168.123.101:6829/4283425216","state":["exists","up"]},{"osd":7,"uuid":"d4157b61-d78c-4dc1-8f66-e17bf55003cd","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6827","nonce":3218096137}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6829","nonce":3218096137}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6833","nonce":3218096137}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6831","nonce":3218096137}]},"public_addr":"192.168.123.100:6827/3218096137","cluster_addr":"192.168.123.100:6829/3218096137","heartbeat_back_addr":"192.168.123.100:6833/3218096137","heartbeat_front_addr":"192.168.123.100:6831/3218096137","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:10.624356+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:10.673245+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:13.519702+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:13.697598+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:16.492951+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:16.845156+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"0.000000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:19.786899+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[{"pgid":"1.0","osds":[0,5,2]}],"primary_temp":[],"blocklist":{"192.168.123.100:0/1850419877":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/680083076":"2026-04-14T17:50:34.270195+0000","192.168.123.100:6800/2938697341":"2026-04-14T17:49:33.038844+0000","192.168.123.100:6801/2938697341":"2026-04-14T17:49:33.038844+0000","192.168.123.100:6801/2142258289":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/2718097030":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/2605477811":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/952737910":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/1857452536":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/529456344":"2026-04-14T17:49:56.160034+0000","192.168.123.100:0/2275315440":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6801/772254196":"2026-04-14T17:49:56.160034+0000","192.168.123.100:0/1553892770":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6800/772254196":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6800/2142258289":"2026-04-14T17:50:34.270195+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-13T17:51:22.702 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: purged_snaps scrub starts 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: purged_snaps scrub ok 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: osd.7 [v2:192.168.123.100:6826/3218096137,v1:192.168.123.100:6827/3218096137] boot 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: osd.6 [v2:192.168.123.101:6824/4283425216,v1:192.168.123.101:6825/4283425216] boot 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: osdmap e25: 8 total, 8 up, 8 in 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/619933138' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:22.703 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:22 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm01", "name": "osd_memory_target"} : dispatch 2026-04-13T17:51:22.741 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-04-13T17:51:16.549032+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '24', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'nonprimary_shards': '{}', 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-04-13T17:51:22.741 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd pool get .mgr pg_num 2026-04-13T17:51:22.876 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:23.243 INFO:teuthology.orchestra.run.vm00.stdout:pg_num: 1 2026-04-13T17:51:23.294 INFO:tasks.cephadm:Setting up client nodes... 2026-04-13T17:51:23.294 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-13T17:51:23.418 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:23.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: pgmap v37: 1 pgs: 1 active+clean; 577 KiB data, 820 MiB used, 119 GiB / 120 GiB avail 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: Detected new or changed devices on vm01 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: osdmap e26: 8 total, 8 up, 8 in 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1882604674' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: Detected new or changed devices on vm00 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"} : dispatch 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:51:23.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:23 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/491523899' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-13T17:51:23.827 INFO:teuthology.orchestra.run.vm00.stdout:[client.0] 2026-04-13T17:51:23.828 INFO:teuthology.orchestra.run.vm00.stdout: key = AQAbLd1posohMRAAvpq1Ok/c2O1l02JnWH0RrQ== 2026-04-13T17:51:23.879 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:51:23.879 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-04-13T17:51:23.879 DEBUG:teuthology.orchestra.run.vm00:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: pgmap v37: 1 pgs: 1 active+clean; 577 KiB data, 820 MiB used, 119 GiB / 120 GiB avail 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: Detected new or changed devices on vm01 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: osdmap e26: 8 total, 8 up, 8 in 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1882604674' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: Detected new or changed devices on vm00 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config rm", "who": "osd/host:vm00", "name": "osd_memory_target"} : dispatch 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:51:23.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:23 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/491523899' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-13T17:51:23.915 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-13T17:51:24.047 INFO:teuthology.orchestra.run.vm01.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm01/config 2026-04-13T17:51:24.473 INFO:teuthology.orchestra.run.vm01.stdout:[client.1] 2026-04-13T17:51:24.473 INFO:teuthology.orchestra.run.vm01.stdout: key = AQAcLd1pCvvJGxAATloTiNGuP6DqN5PEVOjpUA== 2026-04-13T17:51:24.524 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:51:24.524 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-04-13T17:51:24.524 DEBUG:teuthology.orchestra.run.vm01:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-04-13T17:51:24.567 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph config log 1 --format=json 2026-04-13T17:51:24.693 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:24.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:24 vm00 ceph-mon[51174]: osdmap e27: 8 total, 8 up, 8 in 2026-04-13T17:51:24.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:24 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1240833667' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-13T17:51:24.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:24 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1240833667' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-13T17:51:24.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:24 vm00 ceph-mon[51174]: from='client.? 192.168.123.101:0/4184638505' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-13T17:51:24.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:24 vm00 ceph-mon[51174]: from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-13T17:51:24.759 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:24 vm00 ceph-mon[51174]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-13T17:51:24.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:24 vm01 ceph-mon[56805]: osdmap e27: 8 total, 8 up, 8 in 2026-04-13T17:51:24.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:24 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1240833667' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-13T17:51:24.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:24 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1240833667' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-13T17:51:24.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:24 vm01 ceph-mon[56805]: from='client.? 192.168.123.101:0/4184638505' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-13T17:51:24.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:24 vm01 ceph-mon[56805]: from='client.? ' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-13T17:51:24.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:24 vm01 ceph-mon[56805]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-13T17:51:25.056 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:25.105 INFO:teuthology.orchestra.run.vm00.stdout:[{"version":16,"timestamp":"2026-04-13T17:51:20.994994+0000","name":"","changes":[{"name":"osd.6/osd_mclock_max_capacity_iops_hdd","new_value":"36772.141876"}]}] 2026-04-13T17:51:25.105 INFO:tasks.ceph_manager:config epoch is 16 2026-04-13T17:51:25.105 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-04-13T17:51:25.105 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-04-13T17:51:25.105 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mgr dump --format=json 2026-04-13T17:51:25.233 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:25.623 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:25.675 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":19,"flags":0,"active_gid":14227,"active_name":"vm00.vrvkmc","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6800","nonce":1020200204},{"type":"v1","addr":"192.168.123.100:6801","nonce":1020200204}]},"active_addr":"192.168.123.100:6801/1020200204","active_change":"2026-04-13T17:50:34.270294+0000","active_mgr_features":4541880224203014143,"available":true,"standbys":[{"gid":14262,"name":"vm01.qjjyaa","mgr_features":4541880224203014143,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.100:8443/","prometheus":"http://192.168.123.100:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"tentacle":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":1292084223}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":2669617780}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":2233310330}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.100:0","nonce":2186227230}]}]} 2026-04-13T17:51:25.676 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-04-13T17:51:25.676 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-04-13T17:51:25.676 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd dump --format=json 2026-04-13T17:51:25.809 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:25.873 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:25 vm00 ceph-mon[51174]: pgmap v40: 1 pgs: 1 unknown; 0 B data, 615 MiB used, 159 GiB / 160 GiB avail 2026-04-13T17:51:25.873 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:25 vm00 ceph-mon[51174]: osdmap e28: 8 total, 8 up, 8 in 2026-04-13T17:51:25.873 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:25 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/107342601' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-13T17:51:25.873 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:25 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1643112655' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-13T17:51:26.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:25 vm01 ceph-mon[56805]: pgmap v40: 1 pgs: 1 unknown; 0 B data, 615 MiB used, 159 GiB / 160 GiB avail 2026-04-13T17:51:26.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:25 vm01 ceph-mon[56805]: osdmap e28: 8 total, 8 up, 8 in 2026-04-13T17:51:26.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:25 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/107342601' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-13T17:51:26.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:25 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1643112655' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-13T17:51:26.168 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:26.168 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":29,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","created":"2026-04-13T17:49:06.632406+0000","modified":"2026-04-13T17:51:25.839058+0000","last_up_change":"2026-04-13T17:51:21.479929+0000","last_in_change":"2026-04-13T17:51:04.368986+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-13T17:51:16.549032+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"24","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"3331b8e8-674c-47c3-bfcd-df00c60d9807","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6800","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6801","nonce":25951616}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6803","nonce":25951616}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6807","nonce":25951616}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6805","nonce":25951616}]},"public_addr":"192.168.123.101:6801/25951616","cluster_addr":"192.168.123.101:6803/25951616","heartbeat_back_addr":"192.168.123.101:6807/25951616","heartbeat_front_addr":"192.168.123.101:6805/25951616","state":["exists","up"]},{"osd":1,"uuid":"1f69d7b9-313c-49a1-a641-2e9a9aa0a7f2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6803","nonce":933335304}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6805","nonce":933335304}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6809","nonce":933335304}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6807","nonce":933335304}]},"public_addr":"192.168.123.100:6803/933335304","cluster_addr":"192.168.123.100:6805/933335304","heartbeat_back_addr":"192.168.123.100:6809/933335304","heartbeat_front_addr":"192.168.123.100:6807/933335304","state":["exists","up"]},{"osd":2,"uuid":"341364f2-8742-4fac-bbbd-c5a789f2cf2c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6809","nonce":2885552490}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6811","nonce":2885552490}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6815","nonce":2885552490}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6813","nonce":2885552490}]},"public_addr":"192.168.123.101:6809/2885552490","cluster_addr":"192.168.123.101:6811/2885552490","heartbeat_back_addr":"192.168.123.101:6815/2885552490","heartbeat_front_addr":"192.168.123.101:6813/2885552490","state":["exists","up"]},{"osd":3,"uuid":"f5faee95-1805-4c8c-bfbc-2faa1b776d58","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6811","nonce":3342867741}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6813","nonce":3342867741}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6817","nonce":3342867741}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6815","nonce":3342867741}]},"public_addr":"192.168.123.100:6811/3342867741","cluster_addr":"192.168.123.100:6813/3342867741","heartbeat_back_addr":"192.168.123.100:6817/3342867741","heartbeat_front_addr":"192.168.123.100:6815/3342867741","state":["exists","up"]},{"osd":4,"uuid":"8540763b-ebe2-4b6e-aa18-9e97c56c3668","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6817","nonce":2861109068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6819","nonce":2861109068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6823","nonce":2861109068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6821","nonce":2861109068}]},"public_addr":"192.168.123.101:6817/2861109068","cluster_addr":"192.168.123.101:6819/2861109068","heartbeat_back_addr":"192.168.123.101:6823/2861109068","heartbeat_front_addr":"192.168.123.101:6821/2861109068","state":["exists","up"]},{"osd":5,"uuid":"05752c92-abcf-43e4-8022-6bfac80c889f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6819","nonce":3592485992}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6821","nonce":3592485992}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6825","nonce":3592485992}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6823","nonce":3592485992}]},"public_addr":"192.168.123.100:6819/3592485992","cluster_addr":"192.168.123.100:6821/3592485992","heartbeat_back_addr":"192.168.123.100:6825/3592485992","heartbeat_front_addr":"192.168.123.100:6823/3592485992","state":["exists","up"]},{"osd":6,"uuid":"72f6cb1d-edb6-4bf9-be69-8175437241a2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6825","nonce":4283425216}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6827","nonce":4283425216}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6831","nonce":4283425216}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6829","nonce":4283425216}]},"public_addr":"192.168.123.101:6825/4283425216","cluster_addr":"192.168.123.101:6827/4283425216","heartbeat_back_addr":"192.168.123.101:6831/4283425216","heartbeat_front_addr":"192.168.123.101:6829/4283425216","state":["exists","up"]},{"osd":7,"uuid":"d4157b61-d78c-4dc1-8f66-e17bf55003cd","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6827","nonce":3218096137}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6829","nonce":3218096137}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6833","nonce":3218096137}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6831","nonce":3218096137}]},"public_addr":"192.168.123.100:6827/3218096137","cluster_addr":"192.168.123.100:6829/3218096137","heartbeat_back_addr":"192.168.123.100:6833/3218096137","heartbeat_front_addr":"192.168.123.100:6831/3218096137","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:10.624356+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:10.673245+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:13.519702+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:13.697598+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:16.492951+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:16.845156+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:19.426280+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:19.786899+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:0/1850419877":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/680083076":"2026-04-14T17:50:34.270195+0000","192.168.123.100:6800/2938697341":"2026-04-14T17:49:33.038844+0000","192.168.123.100:6801/2938697341":"2026-04-14T17:49:33.038844+0000","192.168.123.100:6801/2142258289":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/2718097030":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/2605477811":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/952737910":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/1857452536":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/529456344":"2026-04-14T17:49:56.160034+0000","192.168.123.100:0/2275315440":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6801/772254196":"2026-04-14T17:49:56.160034+0000","192.168.123.100:0/1553892770":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6800/772254196":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6800/2142258289":"2026-04-14T17:50:34.270195+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-13T17:51:26.215 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-04-13T17:51:26.215 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd dump --format=json 2026-04-13T17:51:26.351 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:26.702 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:26.702 INFO:teuthology.orchestra.run.vm00.stdout:{"epoch":29,"fsid":"00063a34-3761-11f1-944c-abe11cccf0ff","created":"2026-04-13T17:49:06.632406+0000","modified":"2026-04-13T17:51:25.839058+0000","last_up_change":"2026-04-13T17:51:21.479929+0000","last_in_change":"2026-04-13T17:51:04.368986+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":10,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-13T17:51:16.549032+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"24","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"3331b8e8-674c-47c3-bfcd-df00c60d9807","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":27,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6800","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6801","nonce":25951616}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6802","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6803","nonce":25951616}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6806","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6807","nonce":25951616}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6804","nonce":25951616},{"type":"v1","addr":"192.168.123.101:6805","nonce":25951616}]},"public_addr":"192.168.123.101:6801/25951616","cluster_addr":"192.168.123.101:6803/25951616","heartbeat_back_addr":"192.168.123.101:6807/25951616","heartbeat_front_addr":"192.168.123.101:6805/25951616","state":["exists","up"]},{"osd":1,"uuid":"1f69d7b9-313c-49a1-a641-2e9a9aa0a7f2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6802","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6803","nonce":933335304}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6804","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6805","nonce":933335304}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6808","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6809","nonce":933335304}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6806","nonce":933335304},{"type":"v1","addr":"192.168.123.100:6807","nonce":933335304}]},"public_addr":"192.168.123.100:6803/933335304","cluster_addr":"192.168.123.100:6805/933335304","heartbeat_back_addr":"192.168.123.100:6809/933335304","heartbeat_front_addr":"192.168.123.100:6807/933335304","state":["exists","up"]},{"osd":2,"uuid":"341364f2-8742-4fac-bbbd-c5a789f2cf2c","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6808","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6809","nonce":2885552490}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6810","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6811","nonce":2885552490}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6814","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6815","nonce":2885552490}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6812","nonce":2885552490},{"type":"v1","addr":"192.168.123.101:6813","nonce":2885552490}]},"public_addr":"192.168.123.101:6809/2885552490","cluster_addr":"192.168.123.101:6811/2885552490","heartbeat_back_addr":"192.168.123.101:6815/2885552490","heartbeat_front_addr":"192.168.123.101:6813/2885552490","state":["exists","up"]},{"osd":3,"uuid":"f5faee95-1805-4c8c-bfbc-2faa1b776d58","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6810","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6811","nonce":3342867741}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6812","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6813","nonce":3342867741}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6816","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6817","nonce":3342867741}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6814","nonce":3342867741},{"type":"v1","addr":"192.168.123.100:6815","nonce":3342867741}]},"public_addr":"192.168.123.100:6811/3342867741","cluster_addr":"192.168.123.100:6813/3342867741","heartbeat_back_addr":"192.168.123.100:6817/3342867741","heartbeat_front_addr":"192.168.123.100:6815/3342867741","state":["exists","up"]},{"osd":4,"uuid":"8540763b-ebe2-4b6e-aa18-9e97c56c3668","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6816","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6817","nonce":2861109068}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6818","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6819","nonce":2861109068}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6822","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6823","nonce":2861109068}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6820","nonce":2861109068},{"type":"v1","addr":"192.168.123.101:6821","nonce":2861109068}]},"public_addr":"192.168.123.101:6817/2861109068","cluster_addr":"192.168.123.101:6819/2861109068","heartbeat_back_addr":"192.168.123.101:6823/2861109068","heartbeat_front_addr":"192.168.123.101:6821/2861109068","state":["exists","up"]},{"osd":5,"uuid":"05752c92-abcf-43e4-8022-6bfac80c889f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":22,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6818","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6819","nonce":3592485992}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6820","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6821","nonce":3592485992}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6824","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6825","nonce":3592485992}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6822","nonce":3592485992},{"type":"v1","addr":"192.168.123.100:6823","nonce":3592485992}]},"public_addr":"192.168.123.100:6819/3592485992","cluster_addr":"192.168.123.100:6821/3592485992","heartbeat_back_addr":"192.168.123.100:6825/3592485992","heartbeat_front_addr":"192.168.123.100:6823/3592485992","state":["exists","up"]},{"osd":6,"uuid":"72f6cb1d-edb6-4bf9-be69-8175437241a2","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":28,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6824","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6825","nonce":4283425216}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6826","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6827","nonce":4283425216}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6830","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6831","nonce":4283425216}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.101:6828","nonce":4283425216},{"type":"v1","addr":"192.168.123.101:6829","nonce":4283425216}]},"public_addr":"192.168.123.101:6825/4283425216","cluster_addr":"192.168.123.101:6827/4283425216","heartbeat_back_addr":"192.168.123.101:6831/4283425216","heartbeat_front_addr":"192.168.123.101:6829/4283425216","state":["exists","up"]},{"osd":7,"uuid":"d4157b61-d78c-4dc1-8f66-e17bf55003cd","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":25,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6826","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6827","nonce":3218096137}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6828","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6829","nonce":3218096137}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6832","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6833","nonce":3218096137}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.100:6830","nonce":3218096137},{"type":"v1","addr":"192.168.123.100:6831","nonce":3218096137}]},"public_addr":"192.168.123.100:6827/3218096137","cluster_addr":"192.168.123.100:6829/3218096137","heartbeat_back_addr":"192.168.123.100:6833/3218096137","heartbeat_front_addr":"192.168.123.100:6831/3218096137","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:10.624356+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:10.673245+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:13.519702+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:13.697598+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:16.492951+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:16.845156+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:19.426280+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-13T17:51:19.786899+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.100:0/1850419877":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/680083076":"2026-04-14T17:50:34.270195+0000","192.168.123.100:6800/2938697341":"2026-04-14T17:49:33.038844+0000","192.168.123.100:6801/2938697341":"2026-04-14T17:49:33.038844+0000","192.168.123.100:6801/2142258289":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/2718097030":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/2605477811":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/952737910":"2026-04-14T17:50:34.270195+0000","192.168.123.100:0/1857452536":"2026-04-14T17:49:33.038844+0000","192.168.123.100:0/529456344":"2026-04-14T17:49:56.160034+0000","192.168.123.100:0/2275315440":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6801/772254196":"2026-04-14T17:49:56.160034+0000","192.168.123.100:0/1553892770":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6800/772254196":"2026-04-14T17:49:56.160034+0000","192.168.123.100:6800/2142258289":"2026-04-14T17:50:34.270195+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-13T17:51:26.772 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.0 flush_pg_stats 2026-04-13T17:51:26.772 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.1 flush_pg_stats 2026-04-13T17:51:26.772 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.2 flush_pg_stats 2026-04-13T17:51:26.772 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.3 flush_pg_stats 2026-04-13T17:51:26.772 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.4 flush_pg_stats 2026-04-13T17:51:26.772 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.5 flush_pg_stats 2026-04-13T17:51:26.772 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.6 flush_pg_stats 2026-04-13T17:51:26.773 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph tell osd.7 flush_pg_stats 2026-04-13T17:51:27.009 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.023 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.043 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.059 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:26 vm00 ceph-mon[51174]: osdmap e29: 8 total, 8 up, 8 in 2026-04-13T17:51:27.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:26 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3897794835' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-13T17:51:27.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:26 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1448126339' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-13T17:51:27.106 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:26 vm01 ceph-mon[56805]: osdmap e29: 8 total, 8 up, 8 in 2026-04-13T17:51:27.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:26 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/3897794835' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-13T17:51:27.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:26 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1448126339' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-13T17:51:27.147 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.167 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.224 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.597 INFO:teuthology.orchestra.run.vm00.stdout:81604378628 2026-04-13T17:51:27.597 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.3 2026-04-13T17:51:27.615 INFO:teuthology.orchestra.run.vm00.stdout:81604378628 2026-04-13T17:51:27.616 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.2 2026-04-13T17:51:27.722 INFO:teuthology.orchestra.run.vm00.stdout:94489280515 2026-04-13T17:51:27.722 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.5 2026-04-13T17:51:27.788 INFO:teuthology.orchestra.run.vm00.stdout:107374182403 2026-04-13T17:51:27.788 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.7 2026-04-13T17:51:27.851 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.860 INFO:teuthology.orchestra.run.vm00.stdout:94489280515 2026-04-13T17:51:27.861 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.4 2026-04-13T17:51:27.868 INFO:teuthology.orchestra.run.vm00.stdout:107374182403 2026-04-13T17:51:27.868 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.6 2026-04-13T17:51:27.878 INFO:teuthology.orchestra.run.vm00.stdout:68719476741 2026-04-13T17:51:27.878 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.1 2026-04-13T17:51:27.896 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:27.898 INFO:teuthology.orchestra.run.vm00.stdout:68719476741 2026-04-13T17:51:27.898 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.0 2026-04-13T17:51:27.997 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:28.102 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:27 vm00 ceph-mon[51174]: pgmap v43: 1 pgs: 1 unknown; 0 B data, 614 MiB used, 159 GiB / 160 GiB avail 2026-04-13T17:51:28.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:27 vm01 ceph-mon[56805]: pgmap v43: 1 pgs: 1 unknown; 0 B data, 614 MiB used, 159 GiB / 160 GiB avail 2026-04-13T17:51:28.176 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:28.180 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:28.196 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:28.323 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:28.348 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:28.517 INFO:teuthology.orchestra.run.vm00.stdout:81604378626 2026-04-13T17:51:28.709 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378628 got 81604378626 for osd.3 2026-04-13T17:51:28.741 INFO:teuthology.orchestra.run.vm00.stdout:81604378627 2026-04-13T17:51:28.769 INFO:teuthology.orchestra.run.vm00.stdout:107374182401 2026-04-13T17:51:28.793 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378628 got 81604378627 for osd.2 2026-04-13T17:51:28.848 INFO:teuthology.orchestra.run.vm00.stdout:94489280514 2026-04-13T17:51:28.863 INFO:teuthology.orchestra.run.vm00.stdout:68719476739 2026-04-13T17:51:28.895 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:28 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/397986089' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-13T17:51:28.895 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:28 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/305158147' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-13T17:51:28.895 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:28 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/402383900' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-13T17:51:28.895 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:28 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3123145337' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-13T17:51:28.895 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182403 got 107374182401 for osd.7 2026-04-13T17:51:28.907 INFO:teuthology.orchestra.run.vm00.stdout:107374182403 2026-04-13T17:51:28.991 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280515 got 94489280514 for osd.5 2026-04-13T17:51:28.992 INFO:teuthology.orchestra.run.vm00.stdout:94489280515 2026-04-13T17:51:29.021 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476741 got 68719476739 for osd.1 2026-04-13T17:51:29.023 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182403 got 107374182403 for osd.6 2026-04-13T17:51:29.023 INFO:teuthology.orchestra.run.vm00.stdout:68719476741 2026-04-13T17:51:29.023 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:29.060 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280515 got 94489280515 for osd.4 2026-04-13T17:51:29.060 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:29.075 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476741 got 68719476741 for osd.0 2026-04-13T17:51:29.075 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:29.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:28 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/397986089' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-13T17:51:29.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:28 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/305158147' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-13T17:51:29.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:28 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/402383900' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-13T17:51:29.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:28 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/3123145337' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-13T17:51:29.710 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.3 2026-04-13T17:51:29.793 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.2 2026-04-13T17:51:29.844 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:29.896 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.7 2026-04-13T17:51:29.938 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:29.983 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:29 vm00 ceph-mon[51174]: pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:29.983 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:29 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3908770594' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-13T17:51:29.983 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:29 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1520928530' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-13T17:51:29.983 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:29 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1761031153' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-13T17:51:29.983 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:29 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/20649380' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-13T17:51:29.991 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.5 2026-04-13T17:51:30.022 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph osd last-stat-seq osd.1 2026-04-13T17:51:30.060 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:30.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:29 vm01 ceph-mon[56805]: pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:30.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:29 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/3908770594' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-13T17:51:30.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:29 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1520928530' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-13T17:51:30.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:29 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1761031153' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-13T17:51:30.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:29 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/20649380' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-13T17:51:30.170 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:30.256 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:30.306 INFO:teuthology.orchestra.run.vm00.stdout:81604378628 2026-04-13T17:51:30.393 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378628 got 81604378628 for osd.3 2026-04-13T17:51:30.393 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:30.402 INFO:teuthology.orchestra.run.vm00.stdout:81604378628 2026-04-13T17:51:30.464 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378628 got 81604378628 for osd.2 2026-04-13T17:51:30.464 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:30.519 INFO:teuthology.orchestra.run.vm00.stdout:107374182403 2026-04-13T17:51:30.566 INFO:tasks.cephadm.ceph_manager.ceph:need seq 107374182403 got 107374182403 for osd.7 2026-04-13T17:51:30.566 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:30.611 INFO:teuthology.orchestra.run.vm00.stdout:94489280515 2026-04-13T17:51:30.655 INFO:tasks.cephadm.ceph_manager.ceph:need seq 94489280515 got 94489280515 for osd.5 2026-04-13T17:51:30.655 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:30.690 INFO:teuthology.orchestra.run.vm00.stdout:68719476741 2026-04-13T17:51:30.758 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476741 got 68719476741 for osd.1 2026-04-13T17:51:30.758 DEBUG:teuthology.parallel:result is None 2026-04-13T17:51:30.758 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-04-13T17:51:30.759 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph pg dump --format=json 2026-04-13T17:51:30.874 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:30 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/4063932054' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-13T17:51:30.874 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:30 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/974814079' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-13T17:51:30.874 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:30 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/3485294293' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-13T17:51:30.874 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:30 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/2619171459' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-13T17:51:30.874 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:30 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1552740774' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-13T17:51:30.928 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:31.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:30 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/4063932054' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-13T17:51:31.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:30 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/974814079' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-13T17:51:31.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:30 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/3485294293' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-13T17:51:31.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:30 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/2619171459' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-13T17:51:31.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:30 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1552740774' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-13T17:51:31.292 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:31.292 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-04-13T17:51:31.341 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":45,"stamp":"2026-04-13T17:51:30.283254+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":106,"num_read_kb":91,"num_write":189,"num_write_kb":3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":152,"ondisk_log_size":152,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":4,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":4,"kb":167706624,"kb_used":218732,"kb_used_data":3948,"kb_used_omap":70,"kb_used_meta":214457,"kb_avail":167487892,"statfs":{"total":171731582976,"available":171507601408,"internally_reserved":0,"allocated":4042752,"data_stored":2638496,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":72109,"internal_metadata":219604563},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":-590368,"num_objects":-2,"num_object_clones":0,"num_object_copies":-6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":-2,"num_whiteouts":0,"num_read":-106,"num_read_kb":-91,"num_write":-189,"num_write_kb":-3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"4.020244"},"pg_stats":[{"pgid":"1.0","version":"23'152","reported_seq":206,"reported_epoch":29,"state":"active+clean","last_fresh":"2026-04-13T17:51:25.847406+0000","last_change":"2026-04-13T17:51:25.847336+0000","last_active":"2026-04-13T17:51:25.847406+0000","last_peered":"2026-04-13T17:51:25.847406+0000","last_clean":"2026-04-13T17:51:25.847406+0000","last_became_active":"2026-04-13T17:51:25.847187+0000","last_became_peered":"2026-04-13T17:51:25.847187+0000","last_unstale":"2026-04-13T17:51:25.847406+0000","last_undegraded":"2026-04-13T17:51:25.847406+0000","last_fullsized":"2026-04-13T17:51:25.847406+0000","mapping_epoch":28,"log_start":"0'0","ondisk_log_start":"0'0","created":21,"last_epoch_clean":29,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-13T17:51:17.457454+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-13T17:51:17.457454+0000","last_clean_scrub_stamp":"2026-04-13T17:51:17.457454+0000","objects_scrubbed":0,"log_size":152,"log_dups_size":0,"ondisk_log_size":152,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-15T02:21:31.176854+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":106,"num_read_kb":91,"num_write":189,"num_write_kb":3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,2],"acting":[6,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":106,"num_read_kb":91,"num_write":189,"num_write_kb":3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1781760,"data_stored":1771104,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":152,"ondisk_log_size":152,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":25,"seq":107374182403,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6816,"internal_metadata":27452768},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":25,"seq":107374182403,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27704,"kb_used_data":856,"kb_used_omap":5,"kb_used_meta":26810,"kb_avail":20935624,"statfs":{"total":21466447872,"available":21438078976,"internally_reserved":0,"allocated":876544,"data_stored":698792,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":5520,"internal_metadata":27454064},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":22,"seq":94489280516,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27704,"kb_used_data":856,"kb_used_omap":8,"kb_used_meta":26807,"kb_avail":20935624,"statfs":{"total":21466447872,"available":21438078976,"internally_reserved":0,"allocated":876544,"data_stored":698792,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8771,"internal_metadata":27450813},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":22,"seq":94489280516,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27452113},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":19,"seq":81604378628,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":10,"kb_used_meta":26805,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10719,"internal_metadata":27448865},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378628,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27704,"kb_used_data":856,"kb_used_omap":9,"kb_used_meta":26806,"kb_avail":20935624,"statfs":{"total":21466447872,"available":21438078976,"internally_reserved":0,"allocated":876544,"data_stored":698792,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10071,"internal_metadata":27449513},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":11,"kb_used_meta":26804,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":11371,"internal_metadata":27448213},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":11,"kb_used_meta":26804,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":11370,"internal_metadata":27448214},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-13T17:51:31.341 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph pg dump --format=json 2026-04-13T17:51:31.483 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:31.846 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:31.846 INFO:teuthology.orchestra.run.vm00.stderr:dumped all 2026-04-13T17:51:31.896 INFO:teuthology.orchestra.run.vm00.stdout:{"pg_ready":true,"pg_map":{"version":45,"stamp":"2026-04-13T17:51:30.283254+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":106,"num_read_kb":91,"num_write":189,"num_write_kb":3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":152,"ondisk_log_size":152,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":4,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":4,"kb":167706624,"kb_used":218732,"kb_used_data":3948,"kb_used_omap":70,"kb_used_meta":214457,"kb_avail":167487892,"statfs":{"total":171731582976,"available":171507601408,"internally_reserved":0,"allocated":4042752,"data_stored":2638496,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":72109,"internal_metadata":219604563},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":-590368,"num_objects":-2,"num_object_clones":0,"num_object_copies":-6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":-2,"num_whiteouts":0,"num_read":-106,"num_read_kb":-91,"num_write":-189,"num_write_kb":-3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"4.020244"},"pg_stats":[{"pgid":"1.0","version":"23'152","reported_seq":206,"reported_epoch":29,"state":"active+clean","last_fresh":"2026-04-13T17:51:25.847406+0000","last_change":"2026-04-13T17:51:25.847336+0000","last_active":"2026-04-13T17:51:25.847406+0000","last_peered":"2026-04-13T17:51:25.847406+0000","last_clean":"2026-04-13T17:51:25.847406+0000","last_became_active":"2026-04-13T17:51:25.847187+0000","last_became_peered":"2026-04-13T17:51:25.847187+0000","last_unstale":"2026-04-13T17:51:25.847406+0000","last_undegraded":"2026-04-13T17:51:25.847406+0000","last_fullsized":"2026-04-13T17:51:25.847406+0000","mapping_epoch":28,"log_start":"0'0","ondisk_log_start":"0'0","created":21,"last_epoch_clean":29,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-13T17:51:17.457454+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-13T17:51:17.457454+0000","last_clean_scrub_stamp":"2026-04-13T17:51:17.457454+0000","objects_scrubbed":0,"log_size":152,"log_dups_size":0,"ondisk_log_size":152,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-15T02:21:31.176854+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":106,"num_read_kb":91,"num_write":189,"num_write_kb":3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[6,5,2],"acting":[6,5,2],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":6,"acting_primary":6,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":106,"num_read_kb":91,"num_write":189,"num_write_kb":3716,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":590368,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1781760,"data_stored":1771104,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":152,"ondisk_log_size":152,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":25,"seq":107374182403,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":6,"kb_used_meta":26809,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6816,"internal_metadata":27452768},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":25,"seq":107374182403,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27704,"kb_used_data":856,"kb_used_omap":5,"kb_used_meta":26810,"kb_avail":20935624,"statfs":{"total":21466447872,"available":21438078976,"internally_reserved":0,"allocated":876544,"data_stored":698792,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":5520,"internal_metadata":27454064},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":22,"seq":94489280516,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27704,"kb_used_data":856,"kb_used_omap":8,"kb_used_meta":26807,"kb_avail":20935624,"statfs":{"total":21466447872,"available":21438078976,"internally_reserved":0,"allocated":876544,"data_stored":698792,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8771,"internal_metadata":27450813},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":22,"seq":94489280516,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":7,"kb_used_meta":26808,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27452113},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":19,"seq":81604378628,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":10,"kb_used_meta":26805,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10719,"internal_metadata":27448865},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":19,"seq":81604378628,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27704,"kb_used_data":856,"kb_used_omap":9,"kb_used_meta":26806,"kb_avail":20935624,"statfs":{"total":21466447872,"available":21438078976,"internally_reserved":0,"allocated":876544,"data_stored":698792,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":10071,"internal_metadata":27449513},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":1,"apply_latency_ms":1,"commit_latency_ns":1000000,"apply_latency_ns":1000000},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":11,"kb_used_meta":26804,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":11371,"internal_metadata":27448213},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27124,"kb_used_data":276,"kb_used_omap":11,"kb_used_meta":26804,"kb_avail":20936204,"statfs":{"total":21466447872,"available":21438672896,"internally_reserved":0,"allocated":282624,"data_stored":108424,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":11370,"internal_metadata":27448214},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":2,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":5,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-13T17:51:31.896 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-04-13T17:51:31.896 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-04-13T17:51:31.896 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-04-13T17:51:31.896 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph health --format=json 2026-04-13T17:51:32.034 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:32.099 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:31 vm00 ceph-mon[51174]: pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:32.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:31 vm01 ceph-mon[56805]: pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:32.428 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:32.428 INFO:teuthology.orchestra.run.vm00.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-04-13T17:51:32.483 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-04-13T17:51:32.483 INFO:tasks.cephadm:Setup complete, yielding 2026-04-13T17:51:32.483 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-13T17:51:32.485 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-04-13T17:51:32.485 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch status' 2026-04-13T17:51:32.618 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:33.006 INFO:teuthology.orchestra.run.vm00.stdout:Backend: cephadm 2026-04-13T17:51:33.006 INFO:teuthology.orchestra.run.vm00.stdout:Available: Yes 2026-04-13T17:51:33.006 INFO:teuthology.orchestra.run.vm00.stdout:Paused: No 2026-04-13T17:51:33.006 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:32 vm00 ceph-mon[51174]: from='client.14546 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:33.006 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:32 vm00 ceph-mon[51174]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:33.006 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:32 vm00 ceph-mon[51174]: from='client.? 192.168.123.100:0/1431702997' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-13T17:51:33.079 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch ps' 2026-04-13T17:51:33.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:32 vm01 ceph-mon[56805]: from='client.14546 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:33.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:32 vm01 ceph-mon[56805]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:33.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:32 vm01 ceph-mon[56805]: from='client.? 192.168.123.100:0/1431702997' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-13T17:51:33.223 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:33.597 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-13T17:51:33.597 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.vm00 vm00 *:9093,9094 running (40s) 12s ago 83s 18.9M - 0.28.1 91c01b3cec9b 99ec598f58d9 2026-04-13T17:51:33.597 INFO:teuthology.orchestra.run.vm00.stdout:ceph-exporter.vm00 vm00 *:9926 running (89s) 12s ago 89s 8875k - 20.2.0-18-g0d1a6d86d0e 06443d8796ac a98ff996eb04 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:ceph-exporter.vm01 vm01 *:9926 running (53s) 12s ago 53s 9177k - 20.2.0-18-g0d1a6d86d0e 06443d8796ac 4fb0b514f74e 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm00 vm00 running (89s) 12s ago 89s 11.1M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac fca81a915cde 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm01 vm01 running (52s) 12s ago 52s 11.1M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac 00191dd48203 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:grafana.vm00 vm00 *:3000 running (38s) 12s ago 75s 130M - 12.2.0 1849e2140421 45bcd4dd1118 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm00.vrvkmc vm00 *:9283,8765,8443 running (2m) 12s ago 2m 546M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac 1ac94c425331 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm01.qjjyaa vm01 *:8443,9283,8765 running (49s) 12s ago 49s 477M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac f79d430c3377 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm00 vm00 running (2m) 12s ago 2m 49.3M 2048M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 7acf6113c4c4 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm01 vm01 running (48s) 12s ago 48s 46.1M 2048M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 72e8ac9a9f7a 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm00 vm00 *:9100 running (86s) 12s ago 86s 12.7M - 1.9.1 255ec253085f be29437e5eaf 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm01 vm01 *:9100 running (50s) 12s ago 50s 10.8M - 1.9.1 255ec253085f f0cf9a6fc26d 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm01 running (24s) 12s ago 24s 46.7M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 986e9e5514c1 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (24s) 12s ago 24s 55.2M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac c9abab2dd234 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm01 running (21s) 12s ago 21s 46.8M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac f0320e4aab00 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (21s) 12s ago 21s 55.6M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac c9e224da41c8 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm01 running (18s) 12s ago 18s 32.5M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 2fbffda3bca8 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm00 running (18s) 12s ago 18s 68.9M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 6456e4ba4a24 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm01 running (15s) 12s ago 15s 33.2M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac a56f49b5bd18 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm00 running (15s) 12s ago 15s 30.1M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac a35ec14bde10 2026-04-13T17:51:33.598 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.vm00 vm00 *:9095 running (37s) 12s ago 70s 33.9M - 3.6.0 4fcecf061b74 1e553463bde9 2026-04-13T17:51:33.660 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch ls' 2026-04-13T17:51:33.797 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:34.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:33 vm01 ceph-mon[56805]: pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:34.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:33 vm01 ceph-mon[56805]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:34.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:33 vm00 ceph-mon[51174]: pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:34.200 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:33 vm00 ceph-mon[51174]: from='client.14558 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager ?:9093,9094 1/1 12s ago 110s count:1 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:ceph-exporter ?:9926 2/2 13s ago 111s * 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:crash 2/2 13s ago 112s * 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:grafana ?:3000 1/1 12s ago 111s count:1 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:mgr 2/2 13s ago 112s count:2 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:mon 2/2 13s ago 86s vm00:192.168.123.100=vm00;vm01:192.168.123.101=vm01;count:2 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter ?:9100 2/2 13s ago 110s * 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:osd.all-available-devices 8 13s ago 38s * 2026-04-13T17:51:34.200 INFO:teuthology.orchestra.run.vm00.stdout:prometheus ?:9095 1/1 12s ago 111s count:1 2026-04-13T17:51:34.250 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch host ls' 2026-04-13T17:51:34.385 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:34.775 INFO:teuthology.orchestra.run.vm00.stdout:HOST ADDR LABELS STATUS 2026-04-13T17:51:34.775 INFO:teuthology.orchestra.run.vm00.stdout:vm00 192.168.123.100 2026-04-13T17:51:34.775 INFO:teuthology.orchestra.run.vm00.stdout:vm01 192.168.123.101 2026-04-13T17:51:34.775 INFO:teuthology.orchestra.run.vm00.stdout:2 hosts in cluster 2026-04-13T17:51:34.821 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch device ls' 2026-04-13T17:51:34.958 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:35.025 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:34 vm00 ceph-mon[51174]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:35.025 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:35.328 INFO:teuthology.orchestra.run.vm00.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-13T17:51:35.328 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme0n1 hdd Linux_8b2c1f5b0b2f0e200b07 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.328 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme1n1 hdd Linux_3341fccde0b3848ce18b 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.328 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme2n1 hdd Linux_26e9e0aced76c789d80f 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.328 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme3n1 hdd Linux_c2bf6a0108b89cd03010 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.328 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 12s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdb hdd DWNBRSTVMM00001 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdc hdd DWNBRSTVMM00002 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdd hdd DWNBRSTVMM00003 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vde hdd DWNBRSTVMM00004 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme0n1 hdd Linux_9e712741119b9c9a8422 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme1n1 hdd Linux_707733fc50b1c23686f1 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme2n1 hdd Linux_3680d36b7ca865845e0b 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme3n1 hdd Linux_1c49275885a38132c982 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 12s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.329 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:51:35.386 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-13T17:51:35.388 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-04-13T17:51:35.388 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch apply jaeger' 2026-04-13T17:51:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:34 vm01 ceph-mon[56805]: from='client.14562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:35.533 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:35.907 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:35 vm00 ceph-mon[51174]: from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:35.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:35 vm00 ceph-mon[51174]: pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:35.908 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:35 vm00 ceph-mon[51174]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:35.929 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled elasticsearch update... 2026-04-13T17:51:35.929 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled jaeger-collector update... 2026-04-13T17:51:35.929 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled jaeger-query update... 2026-04-13T17:51:35.930 INFO:teuthology.orchestra.run.vm00.stdout:Scheduled jaeger-agent update... 2026-04-13T17:51:35.987 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-13T17:51:35.990 INFO:tasks.cephadm:Waiting for ceph service elasticsearch to start (timeout 300)... 2026-04-13T17:51:35.990 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:36.135 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:36.168 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:35 vm01 ceph-mon[56805]: from='client.14566 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:36.168 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:35 vm01 ceph-mon[56805]: pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:36.168 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:35 vm01 ceph-mon[56805]: from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:36.514 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:36.514 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:21.572242Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:21.006887Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:21.006953Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:21.572281Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:35.925141Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:21.007038Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:21.007077Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:21.006998Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:21.007128Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:21.572314Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:36.585 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch apply jaeger", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: Saving service elasticsearch spec with placement count:1 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: Saving service jaeger-collector spec with placement count:1 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: Saving service jaeger-query spec with placement count:1 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: Saving service jaeger-agent spec with placement * 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.054 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch apply jaeger", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: Saving service elasticsearch spec with placement count:1 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: Saving service jaeger-collector spec with placement count:1 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: Saving service jaeger-query spec with placement count:1 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: Saving service jaeger-agent spec with placement * 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:37.585 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:37.738 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:38.154 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:38.154 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:35.925141Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:38.188 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:51:38.223 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:38.223 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:38.223 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:38.223 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:38.223 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:51:38.223 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:51:38.224 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:38.224 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:51:38.233 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:39.233 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:39.256 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:38 vm00 ceph-mon[51174]: Deploying daemon jaeger-agent.vm01 on vm01 2026-04-13T17:51:39.370 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:39.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:38 vm01 ceph-mon[56805]: Deploying daemon jaeger-agent.vm01 on vm01 2026-04-13T17:51:39.740 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:39.740 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:35.925141Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:39.799 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:40.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:39 vm00 ceph-mon[51174]: from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:40.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:39 vm00 ceph-mon[51174]: pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:40.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:39 vm01 ceph-mon[56805]: from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:40.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:39 vm01 ceph-mon[56805]: pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:40.801 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:40.949 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:41.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:40 vm00 ceph-mon[51174]: from='client.14590 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:41.338 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:41.338 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:35.925141Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:41.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:40 vm01 ceph-mon[56805]: from='client.14590 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:41.408 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:42.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:41 vm00 ceph-mon[51174]: pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:42.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:41 vm01 ceph-mon[56805]: pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:42.409 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:42.543 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:42.929 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:42.929 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:35.925141Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:42.985 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:43.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:42 vm00 ceph-mon[51174]: from='client.14594 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:43.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:42 vm01 ceph-mon[56805]: from='client.14594 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:43.985 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:44.110 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:44.228 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:43 vm00 ceph-mon[51174]: pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:44.228 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:43 vm00 ceph-mon[51174]: from='client.14598 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:44.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:43 vm01 ceph-mon[56805]: pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:44.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:43 vm01 ceph-mon[56805]: from='client.14598 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:44.468 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:44.468 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:35.925141Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:44.535 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:45.536 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:45.671 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:46.039 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:46.039 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:35.925141Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:46.094 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:46.218 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:45 vm01 ceph-mon[56805]: pgmap v52: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:46.218 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:45 vm01 ceph-mon[56805]: from='client.14602 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:46.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:45 vm00 ceph-mon[51174]: pgmap v52: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:46.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:45 vm00 ceph-mon[51174]: from='client.14602 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:47.095 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:47.230 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:47.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:47 vm00 ceph-mon[51174]: from='client.14606 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:47.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:47 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:47.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:47 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:47.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:47 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:47.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:47 vm00 ceph-mon[51174]: Deploying daemon jaeger-agent.vm00 on vm00 2026-04-13T17:51:47.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:47 vm00 ceph-mon[51174]: pgmap v53: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:47.591 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:47.591 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:46.157430Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:47.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:47 vm01 ceph-mon[56805]: from='client.14606 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:47.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:47 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:47.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:47 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:47.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:47 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:47.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:47 vm01 ceph-mon[56805]: Deploying daemon jaeger-agent.vm00 on vm00 2026-04-13T17:51:47.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:47 vm01 ceph-mon[56805]: pgmap v53: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:47.667 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:48.425 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:48 vm00 ceph-mon[51174]: from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:48.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:48 vm01 ceph-mon[56805]: from='client.14610 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:48.668 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:48.816 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:49.225 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:49.225 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:49.275 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:49.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:49 vm00 ceph-mon[51174]: pgmap v54: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:49.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:49 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:49 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:49 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:49 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.509 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:49 vm00 ceph-mon[51174]: Deploying daemon elasticsearch.vm01 on vm01 2026-04-13T17:51:49.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:49 vm01 ceph-mon[56805]: pgmap v54: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:49.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:49 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:49 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:49 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:49 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:49.891 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:49 vm01 ceph-mon[56805]: Deploying daemon elasticsearch.vm01 on vm01 2026-04-13T17:51:50.276 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:50.403 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:50.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:50 vm00 ceph-mon[51174]: from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:50.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:50.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:50 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:50.751 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:50.751 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:50.804 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:50.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:50 vm01 ceph-mon[56805]: from='client.14614 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:50.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:51:50.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:50 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:51:51.804 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:51.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:51 vm00 ceph-mon[51174]: pgmap v55: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:51.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:51 vm00 ceph-mon[51174]: from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:51.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:51 vm01 ceph-mon[56805]: pgmap v55: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:51.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:51 vm01 ceph-mon[56805]: from='client.14618 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:51.932 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:52.301 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:52.301 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:52.349 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:53.349 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:53.485 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:53.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:53 vm00 ceph-mon[51174]: pgmap v56: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:53.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:53 vm00 ceph-mon[51174]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:53.850 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:53.850 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:53.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:53 vm01 ceph-mon[56805]: pgmap v56: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:53.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:53 vm01 ceph-mon[56805]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:53.899 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:54.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:54 vm00 ceph-mon[51174]: from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:54.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:54 vm01 ceph-mon[56805]: from='client.14626 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:54.899 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:55.028 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:55.379 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:55.379 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:55.428 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:55.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:55 vm00 ceph-mon[51174]: pgmap v57: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:55.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:55 vm01 ceph-mon[56805]: pgmap v57: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:56.428 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:56.568 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:56.648 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:56 vm01 ceph-mon[56805]: from='client.14630 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:56.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:56 vm00 ceph-mon[51174]: from='client.14630 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:56.936 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:56.936 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:56.986 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:57.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:57 vm00 ceph-mon[51174]: pgmap v58: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:57.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:57 vm00 ceph-mon[51174]: from='client.14634 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:57.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:57 vm01 ceph-mon[56805]: pgmap v58: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:57.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:57 vm01 ceph-mon[56805]: from='client.14634 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:57.988 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:58.130 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:58.498 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:51:58.498 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:51:58.561 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:51:59.562 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:51:59.695 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:51:59.812 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:59 vm00 ceph-mon[51174]: pgmap v59: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:59.812 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:51:59 vm00 ceph-mon[51174]: from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:51:59.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:59 vm01 ceph-mon[56805]: pgmap v59: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:51:59.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:51:59 vm01 ceph-mon[56805]: from='client.14638 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:00.042 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:00.043 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:00.114 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:01.115 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:01.247 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:01.556 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:01 vm00 ceph-mon[51174]: from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:01.557 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:01 vm00 ceph-mon[51174]: pgmap v60: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:01.593 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:01.593 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:01.642 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:01.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:01 vm01 ceph-mon[56805]: from='client.14642 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:01.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:01 vm01 ceph-mon[56805]: pgmap v60: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:02.642 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:02.774 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:02.812 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:02 vm00 ceph-mon[51174]: from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:02.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:02 vm01 ceph-mon[56805]: from='client.14646 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:03.124 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:03.124 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:03.170 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:03.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:03 vm00 ceph-mon[51174]: pgmap v61: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:03.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:03 vm01 ceph-mon[56805]: pgmap v61: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:04.171 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:04.303 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:04.659 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:04.659 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:04.707 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:04.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:04 vm00 ceph-mon[51174]: from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:04.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:04 vm01 ceph-mon[56805]: from='client.14650 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:05.708 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:05.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:05 vm00 ceph-mon[51174]: pgmap v62: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:05.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:05 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:52:05.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:05 vm00 ceph-mon[51174]: from='client.14654 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:05.830 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:05.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:05 vm01 ceph-mon[56805]: pgmap v62: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:05.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:05 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:52:05.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:05 vm01 ceph-mon[56805]: from='client.14654 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:06.170 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:06.170 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:06.214 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:07.214 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:07.334 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:07.688 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:07.688 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:07.733 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:07.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:07 vm01 ceph-mon[56805]: from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:07.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:07 vm01 ceph-mon[56805]: pgmap v63: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:08.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:07 vm00 ceph-mon[51174]: from='client.14658 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:08.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:07 vm00 ceph-mon[51174]: pgmap v63: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:08.733 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:08.865 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:08.888 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:08 vm00 ceph-mon[51174]: from='client.14662 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:08.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:08 vm01 ceph-mon[56805]: from='client.14662 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:09.229 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:09.229 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:09.288 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:09.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:09 vm00 ceph-mon[51174]: pgmap v64: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:09.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:09 vm01 ceph-mon[56805]: pgmap v64: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:10.289 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:10.416 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:10.771 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:10.771 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:10.814 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:10.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:10 vm01 ceph-mon[56805]: from='client.14666 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:11.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:10 vm00 ceph-mon[51174]: from='client.14666 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:11.816 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:11.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:11 vm01 ceph-mon[56805]: pgmap v65: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:11.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:11 vm01 ceph-mon[56805]: from='client.14670 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:11.944 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:11.965 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:11 vm00 ceph-mon[51174]: pgmap v65: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:11.965 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:11 vm00 ceph-mon[51174]: from='client.14670 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:12.292 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:12.293 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:12.339 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:13.340 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:13.475 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:13.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:13 vm00 ceph-mon[51174]: from='client.14674 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:13.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:13 vm00 ceph-mon[51174]: pgmap v66: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:13.851 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:13.851 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:13.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:13 vm01 ceph-mon[56805]: from='client.14674 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:13.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:13 vm01 ceph-mon[56805]: pgmap v66: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:13.904 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:14.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:14 vm01 ceph-mon[56805]: from='client.14678 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:14.904 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:14.925 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:14 vm00 ceph-mon[51174]: from='client.14678 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:15.029 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:15.382 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:15.382 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:15.451 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:15.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:15 vm00 ceph-mon[51174]: pgmap v67: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:15.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:15 vm01 ceph-mon[56805]: pgmap v67: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:16.452 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:16.582 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:16.826 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:16 vm00 ceph-mon[51174]: from='client.14682 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:16.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:16 vm01 ceph-mon[56805]: from='client.14682 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:16.918 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:16.918 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:16.966 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:17.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:17 vm01 ceph-mon[56805]: pgmap v68: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:17.890 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:17 vm01 ceph-mon[56805]: from='client.14686 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:17.966 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:17.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:17 vm00 ceph-mon[51174]: pgmap v68: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:17.990 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:17 vm00 ceph-mon[51174]: from='client.14686 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:18.097 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:18.466 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:18.466 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:18.525 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:19.526 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:19.681 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:19.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:19 vm00 ceph-mon[51174]: pgmap v69: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:19.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:19 vm00 ceph-mon[51174]: from='client.14690 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:19.805 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:19 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:52:20.044 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:19 vm01 ceph-mon[56805]: pgmap v69: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:20.044 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:19 vm01 ceph-mon[56805]: from='client.14690 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:20.044 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:19 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:52:20.045 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:20.045 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:51:35.902694Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:20.093 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:21.094 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:21.233 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:21.440 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: from='client.14694 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:21.440 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.440 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.440 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.440 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.440 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: Deploying daemon jaeger-collector.vm00 on vm00 2026-04-13T17:52:21.440 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: pgmap v70: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:21.441 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:21 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: from='client.14694 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: Deploying daemon jaeger-collector.vm00 on vm00 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: pgmap v70: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:21.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:21 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:21.614 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:21.614 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:21.688 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:22.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:22 vm00 ceph-mon[51174]: from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:22.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:22 vm01 ceph-mon[56805]: from='client.14698 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:22.688 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:22.822 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:23.197 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:23.197 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:23.243 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:23.500 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:23 vm01 ceph-mon[56805]: pgmap v71: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:23.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:23 vm00 ceph-mon[51174]: pgmap v71: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:24.244 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:24.372 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:24.488 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:24 vm00 ceph-mon[51174]: from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:24.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:24 vm01 ceph-mon[56805]: from='client.14702 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:24.743 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:24.743 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:51:35.914805Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:24.794 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:25.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:25 vm00 ceph-mon[51174]: pgmap v72: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:25.577 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:25 vm00 ceph-mon[51174]: from='client.14706 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:25.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:25 vm01 ceph-mon[56805]: pgmap v72: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:25.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:25 vm01 ceph-mon[56805]: from='client.14706 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:25.795 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:25.940 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:26.295 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:26.295 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:26.365 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:27.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:27 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:27 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:27 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:27 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:27 vm00 ceph-mon[51174]: Deploying daemon jaeger-query.vm01 on vm01 2026-04-13T17:52:27.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:27 vm00 ceph-mon[51174]: pgmap v73: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:27.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:27 vm00 ceph-mon[51174]: from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:27.366 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:27.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:27 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:27 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:27 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:27 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:27.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:27 vm01 ceph-mon[56805]: Deploying daemon jaeger-query.vm01 on vm01 2026-04-13T17:52:27.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:27 vm01 ceph-mon[56805]: pgmap v73: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:27.391 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:27 vm01 ceph-mon[56805]: from='client.14710 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:27.489 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:27.823 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:27.823 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:27.870 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:28.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:28 vm01 ceph-mon[56805]: from='client.14714 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:28.576 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:28 vm00 ceph-mon[51174]: from='client.14714 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:28.871 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:28.993 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:29.327 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:29 vm00 ceph-mon[51174]: pgmap v74: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:29.336 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:29.336 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:29.380 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:29.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:29 vm01 ceph-mon[56805]: pgmap v74: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:30.381 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:30.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:30 vm01 ceph-mon[56805]: from='client.14718 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:30.404 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:30 vm00 ceph-mon[51174]: from='client.14718 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:30.506 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:30.843 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:30.843 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:30.904 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:31.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:31 vm01 ceph-mon[56805]: pgmap v75: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:31.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:31 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:31.640 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:31 vm01 ceph-mon[56805]: from='client.14722 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:31.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:31 vm00 ceph-mon[51174]: pgmap v75: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:31.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:31 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:31.827 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:31 vm00 ceph-mon[51174]: from='client.14722 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:31.905 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:32.032 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:32.355 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:32.355 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:36.725081Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:36.725134Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "ports": [9200], "running": 0, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:51:35.920499Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "ports": [16686], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:36.725201Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:36.725231Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:36.725169Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:36.725260Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:32.398 INFO:tasks.cephadm:elasticsearch has 0/1 2026-04-13T17:52:33.400 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:33.541 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:33.908 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:33.909 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:51:37.372303Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:51:37.372164Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:51:37.372206Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "last_refresh": "2026-04-13T17:52:33.863204Z", "ports": [9200], "running": 1, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:51:37.372368Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "last_refresh": "2026-04-13T17:52:33.863178Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "ports": [14250], "running": 0, "size": 1}}, {"events": ["2026-04-13T17:52:32.934776Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "last_refresh": "2026-04-13T17:52:33.863231Z", "ports": [16686], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:51:37.372120Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:51:37.372061Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:51:37.372253Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:51:37.372470Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:51:37.372420Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:33.986 INFO:tasks.cephadm:elasticsearch has 1/1 2026-04-13T17:52:33.986 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-13T17:52:33.989 INFO:tasks.cephadm:Waiting for ceph service jaeger-collector to start (timeout 300)... 2026-04-13T17:52:33.989 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: pgmap v76: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='client.14726 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.016 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:33 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.120 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: pgmap v76: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='client.14726 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:33 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:34.545 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:34.545 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:52:34.468240Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:52:33.862916Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:52:33.862964Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "last_refresh": "2026-04-13T17:52:33.863204Z", "ports": [9200], "running": 1, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:52:34.468281Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "last_refresh": "2026-04-13T17:52:33.863178Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "last_refresh": "2026-04-13T17:52:34.468470Z", "ports": [14250], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:52:32.934776Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "last_refresh": "2026-04-13T17:52:33.863231Z", "ports": [16686], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:52:33.863026Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:52:33.863055Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:52:33.862996Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:52:33.863082Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:52:34.468312Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:34.596 INFO:tasks.cephadm:jaeger-collector has 1/1 2026-04-13T17:52:34.596 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-13T17:52:34.598 INFO:tasks.cephadm:Waiting for ceph service jaeger-query to start (timeout 300)... 2026-04-13T17:52:34.598 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:34.764 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='client.14730 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:52:35.198 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:34 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:52:35.215 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:35.215 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:52:34.468240Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:52:33.862916Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:52:33.862964Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "last_refresh": "2026-04-13T17:52:33.863204Z", "ports": [9200], "running": 1, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:52:34.468281Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "last_refresh": "2026-04-13T17:52:33.863178Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "last_refresh": "2026-04-13T17:52:34.468470Z", "ports": [14250], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:52:32.934776Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "last_refresh": "2026-04-13T17:52:33.863231Z", "ports": [16686], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:52:33.863026Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:52:33.863055Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:52:33.862996Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:52:33.863082Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:52:34.468312Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:35.264 INFO:tasks.cephadm:jaeger-query has 1/1 2026-04-13T17:52:35.264 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-13T17:52:35.274 INFO:tasks.cephadm:Waiting for ceph service jaeger-agent to start (timeout 300)... 2026-04-13T17:52:35.274 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='client.14730 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:52:35.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:34 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-13T17:52:35.401 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:35.744 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:35.744 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:52:34.468240Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:52:33.862916Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:52:33.862964Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "last_refresh": "2026-04-13T17:52:33.863204Z", "ports": [9200], "running": 1, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:52:34.468281Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "last_refresh": "2026-04-13T17:52:33.863178Z", "ports": [6799], "running": 0, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "last_refresh": "2026-04-13T17:52:34.468470Z", "ports": [14250], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:52:32.934776Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "last_refresh": "2026-04-13T17:52:33.863231Z", "ports": [16686], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:52:33.863026Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:52:33.863055Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:52:33.862996Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:52:33.863082Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:52:34.468312Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:35.788 INFO:tasks.cephadm:jaeger-agent has 0/2 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: pgmap v77: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: pgmap v78: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: Reconfiguring jaeger-agent.vm00 deps [] -> ['vm00:14250'] (diff {'vm00:14250'}) 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: Deploying daemon jaeger-agent.vm00 on vm00 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: from='client.14734 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: Health check failed: 2 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:36.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:36.077 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:35 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: pgmap v77: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: pgmap v78: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: Reconfiguring jaeger-agent.vm00 deps [] -> ['vm00:14250'] (diff {'vm00:14250'}) 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: Deploying daemon jaeger-agent.vm00 on vm00 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: from='client.14734 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: Health check failed: 2 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:36.140 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:35 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:36.789 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:36.914 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: Reconfiguring jaeger-agent.vm01 deps [] -> ['vm00:14250'] (diff {'vm00:14250'}) 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: Deploying daemon jaeger-agent.vm01 on vm01 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: from='client.14742 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:37.080 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:36 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:37.281 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:37.281 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:52:34.468240Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:52:34.468149Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:52:34.468179Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "last_refresh": "2026-04-13T17:52:36.795573Z", "ports": [9200], "running": 1, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:52:34.468281Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "last_refresh": "2026-04-13T17:52:36.795545Z", "ports": [6799], "running": 1, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "last_refresh": "2026-04-13T17:52:34.468470Z", "ports": [14250], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:52:32.934776Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "last_refresh": "2026-04-13T17:52:36.795601Z", "ports": [16686], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:52:34.468117Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:52:34.468066Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:52:34.468208Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:52:34.468342Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:52:34.468312Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:37.336 INFO:tasks.cephadm:jaeger-agent has 1/2 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: Reconfiguring jaeger-agent.vm01 deps [] -> ['vm00:14250'] (diff {'vm00:14250'}) 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: Deploying daemon jaeger-agent.vm01 on vm01 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: from='client.14742 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:37.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:36 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:38.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:37 vm00 ceph-mon[51174]: pgmap v79: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:38.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:38.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:38.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:52:38.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:52:38.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:38.326 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:37 vm00 ceph-mon[51174]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:52:38.337 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph orch ls -f json 2026-04-13T17:52:38.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:37 vm01 ceph-mon[56805]: pgmap v79: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:38.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:38.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:38.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-13T17:52:38.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-13T17:52:38.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' 2026-04-13T17:52:38.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:37 vm01 ceph-mon[56805]: from='mgr.14227 192.168.123.100:0/1339809952' entity='mgr.vm00.vrvkmc' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-13T17:52:38.456 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:38.787 INFO:teuthology.orchestra.run.vm00.stdout: 2026-04-13T17:52:38.787 INFO:teuthology.orchestra.run.vm00.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-13T17:49:44.070607Z", "last_refresh": "2026-04-13T17:52:37.331366Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:40.116601Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-13T17:49:42.207552Z", "last_refresh": "2026-04-13T17:52:36.795253Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:40.979428Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-13T17:49:41.742384Z", "last_refresh": "2026-04-13T17:52:36.795308Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:20.176276Z service:elasticsearch [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "elasticsearch", "service_type": "elasticsearch", "status": {"created": "2026-04-13T17:51:35.897243Z", "last_refresh": "2026-04-13T17:52:36.795573Z", "ports": [9200], "running": 1, "size": 1}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-13T17:49:43.156914Z", "last_refresh": "2026-04-13T17:52:37.331408Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:51:48.520945Z service:jaeger-agent [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "jaeger-agent", "service_type": "jaeger-agent", "status": {"created": "2026-04-13T17:51:35.920905Z", "last_refresh": "2026-04-13T17:52:36.795545Z", "ports": [6799], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:52:26.083956Z service:jaeger-collector [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-collector", "service_type": "jaeger-collector", "status": {"created": "2026-04-13T17:51:35.903318Z", "last_refresh": "2026-04-13T17:52:37.331650Z", "ports": [14250], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:52:32.934776Z service:jaeger-query [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "jaeger-query", "service_type": "jaeger-query", "status": {"created": "2026-04-13T17:51:35.915340Z", "last_refresh": "2026-04-13T17:52:36.795601Z", "ports": [16686], "running": 1, "size": 1}}, {"events": ["2026-04-13T17:50:44.279351Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-13T17:49:41.310813Z", "last_refresh": "2026-04-13T17:52:36.795369Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:45.279637Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm00:192.168.123.100=vm00", "vm01:192.168.123.101=vm01"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-13T17:50:07.530669Z", "last_refresh": "2026-04-13T17:52:36.795396Z", "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:43.390686Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-13T17:49:43.632044Z", "last_refresh": "2026-04-13T17:52:36.795340Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-13T17:50:55.707419Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-13T17:50:55.702316Z", "last_refresh": "2026-04-13T17:52:36.795424Z", "running": 8, "size": 8}}, {"events": ["2026-04-13T17:50:45.283936Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-13T17:49:42.689558Z", "last_refresh": "2026-04-13T17:52:37.331440Z", "ports": [9095], "running": 1, "size": 1}}] 2026-04-13T17:52:38.850 INFO:tasks.cephadm:jaeger-agent has 2/2 2026-04-13T17:52:38.850 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-13T17:52:38.852 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm00.local 2026-04-13T17:52:38.852 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-04-13T17:52:38.979 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:39.038 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:38 vm00 ceph-mon[51174]: from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:39.038 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:38 vm00 ceph-mon[51174]: pgmap v80: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:39.038 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:38 vm00 ceph-mon[51174]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 2 failed cephadm daemon(s)) 2026-04-13T17:52:39.038 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:38 vm00 ceph-mon[51174]: Cluster is now healthy 2026-04-13T17:52:39.043 INFO:teuthology.orchestra.run.vm00.stdout:167 167 2026-04-13T17:52:39.079 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch status' 2026-04-13T17:52:39.200 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:39.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:38 vm01 ceph-mon[56805]: from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:39.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:38 vm01 ceph-mon[56805]: pgmap v80: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:39.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:38 vm01 ceph-mon[56805]: Health check cleared: CEPHADM_FAILED_DAEMON (was: 2 failed cephadm daemon(s)) 2026-04-13T17:52:39.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:38 vm01 ceph-mon[56805]: Cluster is now healthy 2026-04-13T17:52:39.542 INFO:teuthology.orchestra.run.vm00.stdout:Backend: cephadm 2026-04-13T17:52:39.542 INFO:teuthology.orchestra.run.vm00.stdout:Available: Yes 2026-04-13T17:52:39.542 INFO:teuthology.orchestra.run.vm00.stdout:Paused: No 2026-04-13T17:52:39.663 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch ps' 2026-04-13T17:52:39.779 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:40.096 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:39 vm00 ceph-mon[51174]: from='client.24431 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager.vm00 vm00 *:9093,9094 running (106s) 2s ago 2m 19.0M - 0.28.1 91c01b3cec9b 99ec598f58d9 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:ceph-exporter.vm00 vm00 *:9926 running (2m) 2s ago 2m 9239k - 20.2.0-18-g0d1a6d86d0e 06443d8796ac a98ff996eb04 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:ceph-exporter.vm01 vm01 *:9926 running (2m) 3s ago 2m 9659k - 20.2.0-18-g0d1a6d86d0e 06443d8796ac 4fb0b514f74e 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm00 vm00 running (2m) 2s ago 2m 11.1M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac fca81a915cde 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:crash.vm01 vm01 running (119s) 3s ago 119s 11.1M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac 00191dd48203 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:elasticsearch.vm01 vm01 *:9200 running (20s) 3s ago 19s 1408M - 9a2652c5f453 c9ff47db0123 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:grafana.vm00 vm00 *:3000 running (105s) 2s ago 2m 131M - 12.2.0 1849e2140421 45bcd4dd1118 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:jaeger-agent.vm00 vm00 *:6799 running (4s) 2s ago 51s 5620k - 9403e8d94e1c 77824eb16644 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:jaeger-agent.vm01 vm01 *:6799 running (4s) 3s ago 53s 7843k - 9403e8d94e1c 07d80a49b632 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:jaeger-collector.vm00 vm00 *:14250 running (14s) 2s ago 14s 10.3M - 2c18772d79b4 999bf7227ef0 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:jaeger-query.vm01 vm01 *:16686 running (7s) 3s ago 7s 10.1M - 87c4704a9650 e0ab2ad83bce 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm00.vrvkmc vm00 *:9283,8765,8443 running (3m) 2s ago 3m 558M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac 1ac94c425331 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:mgr.vm01.qjjyaa vm01 *:8443,9283,8765 running (115s) 3s ago 115s 478M - 20.2.0-18-g0d1a6d86d0e 06443d8796ac f79d430c3377 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm00 vm00 running (3m) 2s ago 3m 55.0M 2048M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 7acf6113c4c4 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:mon.vm01 vm01 running (114s) 3s ago 114s 49.5M 2048M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 72e8ac9a9f7a 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm00 vm00 *:9100 running (2m) 2s ago 2m 13.3M - 1.9.1 255ec253085f be29437e5eaf 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter.vm01 vm01 *:9100 running (116s) 3s ago 116s 13.2M - 1.9.1 255ec253085f f0cf9a6fc26d 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.0 vm01 running (90s) 3s ago 90s 48.2M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 986e9e5514c1 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.1 vm00 running (90s) 2s ago 90s 62.8M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac c9abab2dd234 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.2 vm01 running (87s) 3s ago 87s 48.7M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac f0320e4aab00 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.3 vm00 running (87s) 2s ago 87s 62.5M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac c9e224da41c8 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.4 vm01 running (84s) 3s ago 84s 41.4M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 2fbffda3bca8 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.5 vm00 running (84s) 2s ago 84s 69.4M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac 6456e4ba4a24 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.6 vm01 running (81s) 3s ago 81s 63.4M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac a56f49b5bd18 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:osd.7 vm00 running (81s) 2s ago 81s 39.1M 4096M 20.2.0-18-g0d1a6d86d0e 06443d8796ac a35ec14bde10 2026-04-13T17:52:40.096 INFO:teuthology.orchestra.run.vm00.stdout:prometheus.vm00 vm00 *:9095 running (104s) 2s ago 2m 42.8M - 3.6.0 4fcecf061b74 1e553463bde9 2026-04-13T17:52:40.156 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch ls' 2026-04-13T17:52:40.270 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:40.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:39 vm01 ceph-mon[56805]: from='client.24431 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:alertmanager ?:9093,9094 1/1 3s ago 2m count:1 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:ceph-exporter ?:9926 2/2 3s ago 2m * 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:crash 2/2 3s ago 2m * 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:elasticsearch ?:9200 1/1 3s ago 64s count:1 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:grafana ?:3000 1/1 3s ago 2m count:1 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:jaeger-agent ?:6799 2/2 3s ago 64s * 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:jaeger-collector ?:14250 1/1 3s ago 64s count:1 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:jaeger-query ?:16686 1/1 3s ago 64s count:1 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:mgr 2/2 3s ago 2m count:2 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:mon 2/2 3s ago 2m vm00:192.168.123.100=vm00;vm01:192.168.123.101=vm01;count:2 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:node-exporter ?:9100 2/2 3s ago 2m * 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:osd.all-available-devices 8 3s ago 104s * 2026-04-13T17:52:40.595 INFO:teuthology.orchestra.run.vm00.stdout:prometheus ?:9095 1/1 3s ago 2m count:1 2026-04-13T17:52:40.636 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch host ls' 2026-04-13T17:52:40.749 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:40.962 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:40 vm00 ceph-mon[51174]: pgmap v81: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:40.962 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:40 vm00 ceph-mon[51174]: from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:52:41.065 INFO:teuthology.orchestra.run.vm00.stdout:HOST ADDR LABELS STATUS 2026-04-13T17:52:41.065 INFO:teuthology.orchestra.run.vm00.stdout:vm00 192.168.123.100 2026-04-13T17:52:41.065 INFO:teuthology.orchestra.run.vm00.stdout:vm01 192.168.123.101 2026-04-13T17:52:41.065 INFO:teuthology.orchestra.run.vm00.stdout:2 hosts in cluster 2026-04-13T17:52:41.106 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch device ls' 2026-04-13T17:52:41.220 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:41.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:40 vm01 ceph-mon[56805]: pgmap v81: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-13T17:52:41.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:40 vm01 ceph-mon[56805]: from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:52:41.548 INFO:teuthology.orchestra.run.vm00.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-13T17:52:41.548 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme0n1 hdd Linux_8b2c1f5b0b2f0e200b07 19.9G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.548 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme1n1 hdd Linux_3341fccde0b3848ce18b 19.9G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.548 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme2n1 hdd Linux_26e9e0aced76c789d80f 19.9G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/nvme3n1 hdd Linux_c2bf6a0108b89cd03010 19.9G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 78s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdb hdd DWNBRSTVMM00001 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdc hdd DWNBRSTVMM00002 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vdd hdd DWNBRSTVMM00003 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm00 /dev/vde hdd DWNBRSTVMM00004 20.0G No 78s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme0n1 hdd Linux_9e712741119b9c9a8422 19.9G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme1n1 hdd Linux_707733fc50b1c23686f1 19.9G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme2n1 hdd Linux_3680d36b7ca865845e0b 19.9G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/nvme3n1 hdd Linux_1c49275885a38132c982 19.9G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 79s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vdb hdd DWNBRSTVMM01001 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vdc hdd DWNBRSTVMM01002 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vdd hdd DWNBRSTVMM01003 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.549 INFO:teuthology.orchestra.run.vm00.stdout:vm01 /dev/vde hdd DWNBRSTVMM01004 20.0G No 79s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-13T17:52:41.591 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-04-13T17:52:41.703 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:42.032 INFO:teuthology.orchestra.run.vm00.stdout:osd.all-available-devices 8 5s ago 106s * 2026-04-13T17:52:42.068 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-04-13T17:52:42.070 INFO:tasks.cephadm:Teardown begin 2026-04-13T17:52:42.070 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-13T17:52:42.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:41 vm00 ceph-mon[51174]: from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:52:42.076 INFO:journalctl@ceph.mon.vm00.vm00.stdout:Apr 13 17:52:41 vm00 ceph-mon[51174]: from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:52:42.096 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-13T17:52:42.122 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-04-13T17:52:42.122 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-4 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 00063a34-3761-11f1-944c-abe11cccf0ff -- ceph mgr module disable cephadm 2026-04-13T17:52:42.254 INFO:teuthology.orchestra.run.vm00.stderr:Inferring config /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/mon.vm00/config 2026-04-13T17:52:42.273 INFO:teuthology.orchestra.run.vm00.stderr:Error: statfs /etc/ceph/ceph.client.admin.keyring: no such file or directory 2026-04-13T17:52:42.292 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-04-13T17:52:42.293 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-04-13T17:52:42.293 DEBUG:teuthology.orchestra.run.vm00:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-13T17:52:42.308 DEBUG:teuthology.orchestra.run.vm01:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-13T17:52:42.321 INFO:tasks.cephadm:Stopping all daemons... 2026-04-13T17:52:42.321 INFO:tasks.cephadm.mon.vm00:Stopping mon.vm00... 2026-04-13T17:52:42.321 DEBUG:teuthology.orchestra.run.vm00:> sudo systemctl stop ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00 2026-04-13T17:52:42.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:41 vm01 ceph-mon[56805]: from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:52:42.390 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:41 vm01 ceph-mon[56805]: from='client.14762 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-13T17:52:42.574 DEBUG:teuthology.orchestra.run.vm00:> sudo pkill -f 'journalctl -f -n 0 -u ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm00.service' 2026-04-13T17:52:42.617 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-13T17:52:42.617 INFO:tasks.cephadm.mon.vm00:Stopped mon.vm00 2026-04-13T17:52:42.617 INFO:tasks.cephadm.mon.vm01:Stopping mon.vm01... 2026-04-13T17:52:42.618 DEBUG:teuthology.orchestra.run.vm01:> sudo systemctl stop ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm01 2026-04-13T17:52:42.887 DEBUG:teuthology.orchestra.run.vm01:> sudo pkill -f 'journalctl -f -n 0 -u ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm01.service' 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 systemd[1]: Stopping Ceph mon.vm01 for 00063a34-3761-11f1-944c-abe11cccf0ff... 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01[56801]: 2026-04-13T17:52:42.734+0000 7f431abc5640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.vm01 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01[56801]: 2026-04-13T17:52:42.734+0000 7f431abc5640 -1 mon.vm01@1(peon) e2 *** Got Signal Terminated *** 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 podman[74150]: 2026-04-13 17:52:42.798837437 +0000 UTC m=+0.077972632 container died 72e8ac9a9f7a4da087886c98a151f57086da4cb36d2d81dfad81a57863e8dae8 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360, FROM_IMAGE=rockylinux:9) 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 podman[74150]: 2026-04-13 17:52:42.812678715 +0000 UTC m=+0.091813910 container remove 72e8ac9a9f7a4da087886c98a151f57086da4cb36d2d81dfad81a57863e8dae8 (image=harbor.clyso.com/custom-ceph/ceph/ceph@sha256:04fb1ae5aa13434d0ef747f0e7dd11cd190378f94a382387965098d43e0a0132, name=ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01, FROM_IMAGE=rockylinux:9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, io.buildah.version=1.41.8, CEPH_GIT_REPO=https://github.com/supriti/ceph.git, CEPH_REF=20.2.0-18-g0d1a6d86d0e, CEPH_SHA1=0d1a6d86d0ed4f74ef6e0ad458ad1ff7a2daf360) 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 bash[74150]: ceph-00063a34-3761-11f1-944c-abe11cccf0ff-mon-vm01 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 systemd[1]: ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm01.service: Deactivated successfully. 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 systemd[1]: Stopped Ceph mon.vm01 for 00063a34-3761-11f1-944c-abe11cccf0ff. 2026-04-13T17:52:42.911 INFO:journalctl@ceph.mon.vm01.vm01.stdout:Apr 13 17:52:42 vm01 systemd[1]: ceph-00063a34-3761-11f1-944c-abe11cccf0ff@mon.vm01.service: Consumed 1.578s CPU time. 2026-04-13T17:52:42.929 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-13T17:52:42.929 INFO:tasks.cephadm.mon.vm01:Stopped mon.vm01 2026-04-13T17:52:42.930 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 00063a34-3761-11f1-944c-abe11cccf0ff --force --keep-logs 2026-04-13T17:52:43.063 INFO:teuthology.orchestra.run.vm00.stdout:Deleting cluster with fsid: 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:53:10.162 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 00063a34-3761-11f1-944c-abe11cccf0ff --force --keep-logs 2026-04-13T17:53:10.284 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:53:36.955 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-13T17:53:36.984 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-13T17:53:37.010 INFO:tasks.cephadm:Archiving crash dumps... 2026-04-13T17:53:37.010 DEBUG:teuthology.misc:Transferring archived files from vm00:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/crash to /archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194/remote/vm00/crash 2026-04-13T17:53:37.010 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/crash -- . 2026-04-13T17:53:37.048 INFO:teuthology.orchestra.run.vm00.stderr:tar: /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/crash: Cannot open: No such file or directory 2026-04-13T17:53:37.048 INFO:teuthology.orchestra.run.vm00.stderr:tar: Error is not recoverable: exiting now 2026-04-13T17:53:37.049 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/crash to /archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194/remote/vm01/crash 2026-04-13T17:53:37.049 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/crash -- . 2026-04-13T17:53:37.074 INFO:teuthology.orchestra.run.vm01.stderr:tar: /var/lib/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/crash: Cannot open: No such file or directory 2026-04-13T17:53:37.074 INFO:teuthology.orchestra.run.vm01.stderr:tar: Error is not recoverable: exiting now 2026-04-13T17:53:37.075 INFO:tasks.cephadm:Checking cluster log for badness... 2026-04-13T17:53:37.075 DEBUG:teuthology.orchestra.run.vm00:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-04-13T17:53:37.112 INFO:tasks.cephadm:Compressing logs... 2026-04-13T17:53:37.113 DEBUG:teuthology.orchestra.run.vm00:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-13T17:53:37.155 DEBUG:teuthology.orchestra.run.vm01:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-13T17:53:37.176 INFO:teuthology.orchestra.run.vm00.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-04-13T17:53:37.176 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-13T17:53:37.177 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mon.vm00.log 2026-04-13T17:53:37.177 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.log 2026-04-13T17:53:37.178 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/cephadm.log: /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mon.vm00.log: gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mgr.vm00.vrvkmc.log 2026-04-13T17:53:37.178 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.audit.log 2026-04-13T17:53:37.178 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.log: /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mgr.vm00.vrvkmc.log: 84.2% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.log.gz 2026-04-13T17:53:37.178 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.cephadm.log 2026-04-13T17:53:37.179 INFO:teuthology.orchestra.run.vm01.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-04-13T17:53:37.179 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-13T17:53:37.180 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.audit.log: 90.7% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.audit.log.gz 2026-04-13T17:53:37.180 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-volume.log 2026-04-13T17:53:37.180 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-volume.log 2026-04-13T17:53:37.181 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.cephadm.log: 83.0% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.cephadm.log.gz 2026-04-13T17:53:37.181 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-client.ceph-exporter.vm00.log 2026-04-13T17:53:37.181 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-client.ceph-exporter.vm01.log 2026-04-13T17:53:37.181 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mgr.vm01.qjjyaa.log 2026-04-13T17:53:37.181 INFO:teuthology.orchestra.run.vm01.stderr:gzip/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-volume.log: -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mon.vm01.log 2026-04-13T17:53:37.181 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-client.ceph-exporter.vm01.log: 91.0% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-client.ceph-exporter.vm01.log.gz 2026-04-13T17:53:37.182 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mgr.vm01.qjjyaa.log: 92.4% -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-13T17:53:37.182 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.audit.log 2026-04-13T17:53:37.182 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mon.vm01.log: gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.log 2026-04-13T17:53:37.183 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.audit.log: 90.7% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.audit.log.gz 2026-04-13T17:53:37.183 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.cephadm.log 2026-04-13T17:53:37.184 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.log: 83.5% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.log.gz 2026-04-13T17:53:37.184 INFO:teuthology.orchestra.run.vm01.stderr: 90.4% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mgr.vm01.qjjyaa.log.gz 2026-04-13T17:53:37.184 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.0.log 2026-04-13T17:53:37.185 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.2.log 2026-04-13T17:53:37.185 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.cephadm.log: 82.1% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph.cephadm.log.gz 2026-04-13T17:53:37.185 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.0.log: gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.4.log 2026-04-13T17:53:37.190 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.1.log 2026-04-13T17:53:37.190 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-client.ceph-exporter.vm00.log: 90.8% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-client.ceph-exporter.vm00.log.gz 2026-04-13T17:53:37.191 INFO:teuthology.orchestra.run.vm00.stderr: 96.0% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-volume.log.gz 2026-04-13T17:53:37.191 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.3.log 2026-04-13T17:53:37.193 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.2.log: 96.0% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-volume.log.gz 2026-04-13T17:53:37.193 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.6.log 2026-04-13T17:53:37.198 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.1.log: 92.3% -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-13T17:53:37.200 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.5.log 2026-04-13T17:53:37.205 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.3.log: 93.0% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.1.log.gz 2026-04-13T17:53:37.205 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.7.log 2026-04-13T17:53:37.210 INFO:teuthology.orchestra.run.vm01.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.4.log: /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.6.log: 93.6% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.0.log.gz 2026-04-13T17:53:37.212 INFO:teuthology.orchestra.run.vm01.stderr: 92.3% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mon.vm01.log.gz 2026-04-13T17:53:37.213 INFO:teuthology.orchestra.run.vm01.stderr: 92.9% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.4.log.gz 2026-04-13T17:53:37.217 INFO:teuthology.orchestra.run.vm00.stderr:/var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.5.log: /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.7.log: 92.8% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.3.log.gz 2026-04-13T17:53:37.227 INFO:teuthology.orchestra.run.vm01.stderr: 93.0% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.6.log.gz 2026-04-13T17:53:37.233 INFO:teuthology.orchestra.run.vm00.stderr: 93.5% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.5.log.gz 2026-04-13T17:53:37.235 INFO:teuthology.orchestra.run.vm00.stderr: 93.2% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.7.log.gz 2026-04-13T17:53:37.241 INFO:teuthology.orchestra.run.vm01.stderr: 94.1% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-osd.2.log.gz 2026-04-13T17:53:37.243 INFO:teuthology.orchestra.run.vm01.stderr: 2026-04-13T17:53:37.243 INFO:teuthology.orchestra.run.vm01.stderr:real 0m0.074s 2026-04-13T17:53:37.243 INFO:teuthology.orchestra.run.vm01.stderr:user 0m0.157s 2026-04-13T17:53:37.243 INFO:teuthology.orchestra.run.vm01.stderr:sys 0m0.031s 2026-04-13T17:53:37.244 INFO:teuthology.orchestra.run.vm00.stderr: 89.9% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mgr.vm00.vrvkmc.log.gz 2026-04-13T17:53:37.282 INFO:teuthology.orchestra.run.vm00.stderr: 91.3% -- replaced with /var/log/ceph/00063a34-3761-11f1-944c-abe11cccf0ff/ceph-mon.vm00.log.gz 2026-04-13T17:53:37.284 INFO:teuthology.orchestra.run.vm00.stderr: 2026-04-13T17:53:37.284 INFO:teuthology.orchestra.run.vm00.stderr:real 0m0.118s 2026-04-13T17:53:37.284 INFO:teuthology.orchestra.run.vm00.stderr:user 0m0.267s 2026-04-13T17:53:37.284 INFO:teuthology.orchestra.run.vm00.stderr:sys 0m0.028s 2026-04-13T17:53:37.285 INFO:tasks.cephadm:Archiving logs... 2026-04-13T17:53:37.285 DEBUG:teuthology.misc:Transferring archived files from vm00:/var/log/ceph to /archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194/remote/vm00/log 2026-04-13T17:53:37.285 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-13T17:53:37.362 DEBUG:teuthology.misc:Transferring archived files from vm01:/var/log/ceph to /archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194/remote/vm01/log 2026-04-13T17:53:37.363 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-13T17:53:37.396 INFO:tasks.cephadm:Removing cluster... 2026-04-13T17:53:37.396 DEBUG:teuthology.orchestra.run.vm00:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 00063a34-3761-11f1-944c-abe11cccf0ff --force 2026-04-13T17:53:37.523 INFO:teuthology.orchestra.run.vm00.stdout:Deleting cluster with fsid: 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:53:37.578 DEBUG:teuthology.orchestra.run.vm01:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 00063a34-3761-11f1-944c-abe11cccf0ff --force 2026-04-13T17:53:37.704 INFO:teuthology.orchestra.run.vm01.stdout:Deleting cluster with fsid: 00063a34-3761-11f1-944c-abe11cccf0ff 2026-04-13T17:53:37.755 INFO:tasks.cephadm:Removing cephadm ... 2026-04-13T17:53:37.755 DEBUG:teuthology.orchestra.run.vm00:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-13T17:53:37.769 DEBUG:teuthology.orchestra.run.vm01:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-13T17:53:37.783 INFO:tasks.cephadm:Teardown complete 2026-04-13T17:53:37.783 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-04-13T17:53:37.785 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm00:/dev/vg_nvme/lv_1... 2026-04-13T17:53:37.785 DEBUG:teuthology.orchestra.run.vm00:> sudo nvme disconnect -n lv_1 2026-04-13T17:53:37.934 INFO:teuthology.orchestra.run.vm00.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-13T17:53:37.935 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm00:/dev/vg_nvme/lv_2... 2026-04-13T17:53:37.935 DEBUG:teuthology.orchestra.run.vm00:> sudo nvme disconnect -n lv_2 2026-04-13T17:53:38.053 INFO:teuthology.orchestra.run.vm00.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-13T17:53:38.054 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm00:/dev/vg_nvme/lv_3... 2026-04-13T17:53:38.054 DEBUG:teuthology.orchestra.run.vm00:> sudo nvme disconnect -n lv_3 2026-04-13T17:53:38.168 INFO:teuthology.orchestra.run.vm00.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-13T17:53:38.169 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm00:/dev/vg_nvme/lv_4... 2026-04-13T17:53:38.169 DEBUG:teuthology.orchestra.run.vm00:> sudo nvme disconnect -n lv_4 2026-04-13T17:53:38.281 INFO:teuthology.orchestra.run.vm00.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-13T17:53:38.282 DEBUG:teuthology.orchestra.run.vm00:> set -ex 2026-04-13T17:53:38.282 DEBUG:teuthology.orchestra.run.vm00:> sudo dd of=/scratch_devs 2026-04-13T17:53:38.302 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_1... 2026-04-13T17:53:38.302 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_1 2026-04-13T17:53:38.465 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-13T17:53:38.467 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_2... 2026-04-13T17:53:38.467 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_2 2026-04-13T17:53:38.594 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-13T17:53:38.596 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_3... 2026-04-13T17:53:38.596 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_3 2026-04-13T17:53:38.707 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-13T17:53:38.709 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm01:/dev/vg_nvme/lv_4... 2026-04-13T17:53:38.709 DEBUG:teuthology.orchestra.run.vm01:> sudo nvme disconnect -n lv_4 2026-04-13T17:53:38.823 INFO:teuthology.orchestra.run.vm01.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-13T17:53:38.825 DEBUG:teuthology.orchestra.run.vm01:> set -ex 2026-04-13T17:53:38.825 DEBUG:teuthology.orchestra.run.vm01:> sudo dd of=/scratch_devs 2026-04-13T17:53:38.845 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-04-13T17:53:38.848 INFO:teuthology.task.clock:Checking final clock skew... 2026-04-13T17:53:38.848 DEBUG:teuthology.orchestra.run.vm00:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-13T17:53:38.850 DEBUG:teuthology.orchestra.run.vm01:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-13T17:53:38.861 INFO:teuthology.orchestra.run.vm00.stderr:bash: line 1: ntpq: command not found 2026-04-13T17:53:38.865 INFO:teuthology.orchestra.run.vm00.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-13T17:53:38.865 INFO:teuthology.orchestra.run.vm00.stdout:=============================================================================== 2026-04-13T17:53:38.865 INFO:teuthology.orchestra.run.vm00.stdout:^+ mail.light-speed.de 2 6 377 47 -378us[ -378us] +/- 18ms 2026-04-13T17:53:38.865 INFO:teuthology.orchestra.run.vm00.stdout:^+ s7.vonderste.in 2 6 377 47 +2806us[+2847us] +/- 18ms 2026-04-13T17:53:38.865 INFO:teuthology.orchestra.run.vm00.stdout:^* time.cloudflare.com 3 6 377 47 -1139us[-1099us] +/- 15ms 2026-04-13T17:53:38.865 INFO:teuthology.orchestra.run.vm00.stdout:^+ mail2.light-speed.de 2 6 377 44 -132us[ -132us] +/- 18ms 2026-04-13T17:53:38.900 INFO:teuthology.orchestra.run.vm01.stderr:bash: line 1: ntpq: command not found 2026-04-13T17:53:38.902 INFO:teuthology.orchestra.run.vm01.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-04-13T17:53:38.903 INFO:teuthology.orchestra.run.vm01.stdout:=============================================================================== 2026-04-13T17:53:38.903 INFO:teuthology.orchestra.run.vm01.stdout:^* mail2.light-speed.de 2 6 377 44 -85us[ -147us] +/- 18ms 2026-04-13T17:53:38.903 INFO:teuthology.orchestra.run.vm01.stdout:^+ mail.light-speed.de 2 6 377 47 -353us[ -415us] +/- 18ms 2026-04-13T17:53:38.903 INFO:teuthology.orchestra.run.vm01.stdout:^+ time.cloudflare.com 3 6 373 47 -1268us[-1330us] +/- 15ms 2026-04-13T17:53:38.903 INFO:teuthology.orchestra.run.vm01.stdout:^+ s7.vonderste.in 2 6 373 45 +2929us[+2867us] +/- 18ms 2026-04-13T17:53:38.903 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-04-13T17:53:38.905 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-04-13T17:53:38.905 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-04-13T17:53:38.908 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-04-13T17:53:38.910 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-04-13T17:53:38.912 INFO:teuthology.task.internal:Duration was 402.484786 seconds 2026-04-13T17:53:38.912 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-04-13T17:53:38.915 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-04-13T17:53:38.915 DEBUG:teuthology.orchestra.run.vm00:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-13T17:53:38.917 DEBUG:teuthology.orchestra.run.vm01:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-13T17:53:38.951 INFO:teuthology.orchestra.run.vm00.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-13T17:53:38.977 INFO:teuthology.orchestra.run.vm01.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-04-13T17:53:39.427 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-04-13T17:53:39.428 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm00.local 2026-04-13T17:53:39.428 DEBUG:teuthology.orchestra.run.vm00:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-13T17:53:39.448 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm01.local 2026-04-13T17:53:39.448 DEBUG:teuthology.orchestra.run.vm01:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-13T17:53:39.488 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-04-13T17:53:39.488 DEBUG:teuthology.orchestra.run.vm00:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-13T17:53:39.490 DEBUG:teuthology.orchestra.run.vm01:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-13T17:53:40.005 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-04-13T17:53:40.005 DEBUG:teuthology.orchestra.run.vm00:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-13T17:53:40.006 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-13T17:53:40.027 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-13T17:53:40.027 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-13T17:53:40.027 INFO:teuthology.orchestra.run.vm00.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-13T17:53:40.027 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-13T17:53:40.027 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-13T17:53:40.028 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-13T17:53:40.029 INFO:teuthology.orchestra.run.vm01.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-13T17:53:40.029 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-13T17:53:40.029 INFO:teuthology.orchestra.run.vm01.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-13T17:53:40.029 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-13T17:53:40.147 INFO:teuthology.orchestra.run.vm01.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 98.3% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-13T17:53:40.176 INFO:teuthology.orchestra.run.vm00.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.8% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-13T17:53:40.178 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-04-13T17:53:40.181 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-04-13T17:53:40.181 DEBUG:teuthology.orchestra.run.vm00:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-13T17:53:40.243 DEBUG:teuthology.orchestra.run.vm01:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-13T17:53:40.267 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-04-13T17:53:40.270 DEBUG:teuthology.orchestra.run.vm00:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-13T17:53:40.285 DEBUG:teuthology.orchestra.run.vm01:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-13T17:53:40.308 INFO:teuthology.orchestra.run.vm00.stdout:kernel.core_pattern = core 2026-04-13T17:53:40.333 INFO:teuthology.orchestra.run.vm01.stdout:kernel.core_pattern = core 2026-04-13T17:53:40.347 DEBUG:teuthology.orchestra.run.vm00:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-13T17:53:40.376 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-13T17:53:40.377 DEBUG:teuthology.orchestra.run.vm01:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-13T17:53:40.402 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-13T17:53:40.403 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-04-13T17:53:40.405 INFO:teuthology.task.internal:Transferring archived files... 2026-04-13T17:53:40.405 DEBUG:teuthology.misc:Transferring archived files from vm00:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194/remote/vm00 2026-04-13T17:53:40.406 DEBUG:teuthology.orchestra.run.vm00:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-13T17:53:40.446 DEBUG:teuthology.misc:Transferring archived files from vm01:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-13_15:28:06-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5194/remote/vm01 2026-04-13T17:53:40.446 DEBUG:teuthology.orchestra.run.vm01:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-13T17:53:40.475 INFO:teuthology.task.internal:Removing archive directory... 2026-04-13T17:53:40.475 DEBUG:teuthology.orchestra.run.vm00:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-13T17:53:40.487 DEBUG:teuthology.orchestra.run.vm01:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-13T17:53:40.532 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-04-13T17:53:40.535 INFO:teuthology.task.internal:Not uploading archives. 2026-04-13T17:53:40.535 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-04-13T17:53:40.538 INFO:teuthology.task.internal:Tidying up after the test... 2026-04-13T17:53:40.538 DEBUG:teuthology.orchestra.run.vm00:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-13T17:53:40.542 DEBUG:teuthology.orchestra.run.vm01:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-13T17:53:40.556 INFO:teuthology.orchestra.run.vm00.stdout: 8532146 0 drwxr-xr-x 2 ubuntu ubuntu 6 Apr 13 17:53 /home/ubuntu/cephtest 2026-04-13T17:53:40.591 INFO:teuthology.orchestra.run.vm01.stdout: 8532140 0 drwxr-xr-x 2 ubuntu ubuntu 6 Apr 13 17:53 /home/ubuntu/cephtest 2026-04-13T17:53:40.592 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-04-13T17:53:40.600 INFO:teuthology.run:Summary data: description: orch:cephadm:smoke-roleless/{0-distro/centos_9.stream 0-nvme-loop 1-start 2-services/jaeger 3-final} duration: 402.48478603363037 owner: supriti success: true 2026-04-13T17:53:40.601 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-13T17:53:40.625 INFO:teuthology.run:pass